^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for Atmel AT32 and AT91 SPI Controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2006 Atmel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <trace/events/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* SPI register offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define SPI_CR 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define SPI_MR 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define SPI_RDR 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define SPI_TDR 0x000c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define SPI_SR 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define SPI_IER 0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define SPI_IDR 0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define SPI_IMR 0x001c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define SPI_CSR0 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define SPI_CSR1 0x0034
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define SPI_CSR2 0x0038
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define SPI_CSR3 0x003c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define SPI_FMR 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define SPI_FLR 0x0044
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define SPI_VERSION 0x00fc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define SPI_RPR 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define SPI_RCR 0x0104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define SPI_TPR 0x0108
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define SPI_TCR 0x010c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SPI_RNPR 0x0110
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SPI_RNCR 0x0114
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SPI_TNPR 0x0118
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SPI_TNCR 0x011c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define SPI_PTCR 0x0120
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define SPI_PTSR 0x0124
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Bitfields in CR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SPI_SPIEN_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SPI_SPIEN_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SPI_SPIDIS_OFFSET 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define SPI_SPIDIS_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define SPI_SWRST_OFFSET 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define SPI_SWRST_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define SPI_LASTXFER_OFFSET 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SPI_LASTXFER_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define SPI_TXFCLR_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SPI_TXFCLR_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SPI_RXFCLR_OFFSET 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define SPI_RXFCLR_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define SPI_FIFOEN_OFFSET 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define SPI_FIFOEN_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define SPI_FIFODIS_OFFSET 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define SPI_FIFODIS_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Bitfields in MR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define SPI_MSTR_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define SPI_MSTR_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define SPI_PS_OFFSET 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define SPI_PS_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define SPI_PCSDEC_OFFSET 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define SPI_PCSDEC_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define SPI_FDIV_OFFSET 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define SPI_FDIV_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define SPI_MODFDIS_OFFSET 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define SPI_MODFDIS_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define SPI_WDRBT_OFFSET 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define SPI_WDRBT_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define SPI_LLB_OFFSET 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define SPI_LLB_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define SPI_PCS_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define SPI_PCS_SIZE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define SPI_DLYBCS_OFFSET 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define SPI_DLYBCS_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Bitfields in RDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define SPI_RD_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define SPI_RD_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Bitfields in TDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define SPI_TD_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define SPI_TD_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* Bitfields in SR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define SPI_RDRF_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define SPI_RDRF_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define SPI_TDRE_OFFSET 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define SPI_TDRE_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define SPI_MODF_OFFSET 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define SPI_MODF_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define SPI_OVRES_OFFSET 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define SPI_OVRES_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define SPI_ENDRX_OFFSET 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define SPI_ENDRX_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define SPI_ENDTX_OFFSET 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define SPI_ENDTX_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define SPI_RXBUFF_OFFSET 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define SPI_RXBUFF_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define SPI_TXBUFE_OFFSET 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define SPI_TXBUFE_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define SPI_NSSR_OFFSET 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define SPI_NSSR_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define SPI_TXEMPTY_OFFSET 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define SPI_TXEMPTY_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define SPI_SPIENS_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define SPI_SPIENS_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define SPI_TXFEF_OFFSET 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define SPI_TXFEF_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define SPI_TXFFF_OFFSET 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define SPI_TXFFF_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define SPI_TXFTHF_OFFSET 26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define SPI_TXFTHF_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define SPI_RXFEF_OFFSET 27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define SPI_RXFEF_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define SPI_RXFFF_OFFSET 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define SPI_RXFFF_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define SPI_RXFTHF_OFFSET 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define SPI_RXFTHF_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define SPI_TXFPTEF_OFFSET 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define SPI_TXFPTEF_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define SPI_RXFPTEF_OFFSET 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define SPI_RXFPTEF_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Bitfields in CSR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define SPI_CPOL_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define SPI_CPOL_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define SPI_NCPHA_OFFSET 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define SPI_NCPHA_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define SPI_CSAAT_OFFSET 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define SPI_CSAAT_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define SPI_BITS_OFFSET 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define SPI_BITS_SIZE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define SPI_SCBR_OFFSET 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define SPI_SCBR_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define SPI_DLYBS_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define SPI_DLYBS_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define SPI_DLYBCT_OFFSET 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define SPI_DLYBCT_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* Bitfields in RCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define SPI_RXCTR_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define SPI_RXCTR_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* Bitfields in TCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define SPI_TXCTR_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define SPI_TXCTR_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Bitfields in RNCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define SPI_RXNCR_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define SPI_RXNCR_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* Bitfields in TNCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define SPI_TXNCR_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define SPI_TXNCR_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Bitfields in PTCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define SPI_RXTEN_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define SPI_RXTEN_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define SPI_RXTDIS_OFFSET 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define SPI_RXTDIS_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define SPI_TXTEN_OFFSET 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define SPI_TXTEN_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define SPI_TXTDIS_OFFSET 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define SPI_TXTDIS_SIZE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* Bitfields in FMR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define SPI_TXRDYM_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define SPI_TXRDYM_SIZE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define SPI_RXRDYM_OFFSET 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define SPI_RXRDYM_SIZE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define SPI_TXFTHRES_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define SPI_TXFTHRES_SIZE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define SPI_RXFTHRES_OFFSET 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define SPI_RXFTHRES_SIZE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Bitfields in FLR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define SPI_TXFL_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define SPI_TXFL_SIZE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define SPI_RXFL_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define SPI_RXFL_SIZE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Constants for BITS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define SPI_BITS_8_BPT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define SPI_BITS_9_BPT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define SPI_BITS_10_BPT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define SPI_BITS_11_BPT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define SPI_BITS_12_BPT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define SPI_BITS_13_BPT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define SPI_BITS_14_BPT 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define SPI_BITS_15_BPT 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define SPI_BITS_16_BPT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define SPI_ONE_DATA 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define SPI_TWO_DATA 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define SPI_FOUR_DATA 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* Bit manipulation macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define SPI_BIT(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) (1 << SPI_##name##_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define SPI_BF(name, value) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define SPI_BFEXT(name, value) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define SPI_BFINS(name, value, old) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) (((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) | SPI_BF(name, value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* Register access macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #define spi_readl(port, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) readl_relaxed((port)->regs + SPI_##reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #define spi_writel(port, reg, value) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) writel_relaxed((value), (port)->regs + SPI_##reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #define spi_writew(port, reg, value) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) writew_relaxed((value), (port)->regs + SPI_##reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * cache operations; better heuristics consider wordsize and bitrate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #define DMA_MIN_BYTES 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define AUTOSUSPEND_TIMEOUT 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct atmel_spi_caps {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) bool is_spi2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) bool has_wdrbt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) bool has_dma_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) bool has_pdc_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * The core SPI transfer engine just talks to a register bank to set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * DMA transfers; transfer queue progress is driven by IRQs. The clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * framework provides the base clock, subdivided for each spi_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct atmel_spi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) phys_addr_t phybase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) unsigned long spi_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct spi_transfer *current_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int current_remaining_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int done_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) dma_addr_t dma_addr_rx_bbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dma_addr_t dma_addr_tx_bbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void *addr_rx_bbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) void *addr_tx_bbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct completion xfer_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct atmel_spi_caps caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) bool use_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) bool use_pdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) bool keep_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u32 fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u8 native_cs_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u8 native_cs_for_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* Controller-specific per-slave state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct atmel_spi_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #define SPI_MAX_DMA_XFER 65535 /* true for both PDC and DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #define INVALID_DMA_ADDRESS 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Version 2 of the SPI controller has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * - CR.LASTXFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * - SPI_CSRx.CSAAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * - SPI_CSRx.SBCR allows faster clocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static bool atmel_spi_is_v2(struct atmel_spi *as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return as->caps.is_spi2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * they assume that spi slave device state will not change on deselect, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * that automagic deselection is OK. ("NPCSx rises if no data is to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * controllers have CSAAT and friends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Even controller newer than ar91rm9200, using GPIOs can make sens as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * it lets us support active-high chipselects despite the controller's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * belief that only active-low devices/systems exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * right when driven with GPIO. ("Mode Fault does not allow more than one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * Master on Chip Select 0.") No workaround exists for that ... so for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * and (c) will trigger that first erratum in some cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct atmel_spi_device *asd = spi->controller_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) u32 mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) chip_select = as->native_cs_for_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) chip_select = spi->chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (atmel_spi_is_v2(as)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) spi_writel(as, CSR0 + 4 * chip_select, asd->csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* For the low SPI version, there is a issue that PDC transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) spi_writel(as, CSR0, asd->csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (as->caps.has_wdrbt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) spi_writel(as, MR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) SPI_BF(PCS, ~(0x01 << chip_select))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) | SPI_BIT(WDRBT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) | SPI_BIT(MODFDIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) | SPI_BIT(MSTR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) spi_writel(as, MR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) SPI_BF(PCS, ~(0x01 << chip_select))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) | SPI_BIT(MODFDIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) | SPI_BIT(MSTR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) mr = spi_readl(as, MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) gpiod_set_value(spi->cs_gpiod, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* Make sure clock polarity is correct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) for (i = 0; i < spi->master->num_chipselect; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) csr = spi_readl(as, CSR0 + 4 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if ((csr ^ cpol) & SPI_BIT(CPOL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) spi_writel(as, CSR0 + 4 * i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) csr ^ SPI_BIT(CPOL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) mr = spi_readl(as, MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) mr = SPI_BFINS(PCS, ~(1 << chip_select), mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) gpiod_set_value(spi->cs_gpiod, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) spi_writel(as, MR, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u32 mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) chip_select = as->native_cs_for_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) chip_select = spi->chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* only deactivate *this* device; sometimes transfers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * another device may be active when this routine is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) mr = spi_readl(as, MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (~SPI_BFEXT(PCS, mr) & (1 << chip_select)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) mr = SPI_BFINS(PCS, 0xf, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) spi_writel(as, MR, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) spi_writel(as, CR, SPI_BIT(LASTXFER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) gpiod_set_value(spi->cs_gpiod, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) spin_lock_irqsave(&as->lock, as->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) spin_unlock_irqrestore(&as->lock, as->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static inline bool atmel_spi_use_dma(struct atmel_spi *as,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return as->use_dma && xfer->len >= DMA_MIN_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static bool atmel_spi_can_dma(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return atmel_spi_use_dma(as, xfer) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) !atmel_spi_is_vmalloc_xfer(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return atmel_spi_use_dma(as, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static int atmel_spi_dma_slave_config(struct atmel_spi *as,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct dma_slave_config *slave_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u8 bits_per_word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct spi_master *master = platform_get_drvdata(as->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (bits_per_word > 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) slave_config->src_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) slave_config->dst_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) slave_config->device_fc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * This driver uses fixed peripheral select mode (PS bit set to '0' in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * the Mode Register).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * So according to the datasheet, when FIFOs are available (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * enabled), the Transmit FIFO operates in Multiple Data Mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * In this mode, up to 2 data, not 4, can be written into the Transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * Data Register in a single access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * However, the first data has to be written into the lowest 16 bits and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * the second data into the highest 16 bits of the Transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * Data Register. For 8bit data (the most frequent case), it would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * require to rework tx_buf so each data would actualy fit 16 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * So we'd rather write only one data at the time. Hence the transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * path works the same whether FIFOs are available (and enabled) or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) slave_config->direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (dmaengine_slave_config(master->dma_tx, slave_config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) dev_err(&as->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) "failed to configure tx dma channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * This driver configures the spi controller for master mode (MSTR bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * set to '1' in the Mode Register).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * So according to the datasheet, when FIFOs are available (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * enabled), the Receive FIFO operates in Single Data Mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * So the receive path works the same whether FIFOs are available (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * enabled) or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) slave_config->direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (dmaengine_slave_config(master->dma_rx, slave_config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) dev_err(&as->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) "failed to configure rx dma channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static int atmel_spi_configure_dma(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct atmel_spi *as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct dma_slave_config slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct device *dev = &as->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) master->dma_tx = dma_request_chan(dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (IS_ERR(master->dma_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) err = dev_err_probe(dev, PTR_ERR(master->dma_tx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) "No TX DMA channel, DMA is disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) goto error_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) master->dma_rx = dma_request_chan(dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (IS_ERR(master->dma_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) err = PTR_ERR(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * No reason to check EPROBE_DEFER here since we have already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * requested tx channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) dev_err(dev, "No RX DMA channel, DMA is disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) err = atmel_spi_dma_slave_config(as, &slave_config, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) dev_info(&as->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) "Using %s (tx) and %s (rx) for DMA transfers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) dma_chan_name(master->dma_tx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dma_chan_name(master->dma_rx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (!IS_ERR(master->dma_rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dma_release_channel(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (!IS_ERR(master->dma_tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) dma_release_channel(master->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) error_clear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) master->dma_tx = master->dma_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static void atmel_spi_stop_dma(struct spi_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (master->dma_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) dmaengine_terminate_all(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (master->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dmaengine_terminate_all(master->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static void atmel_spi_release_dma(struct spi_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (master->dma_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) dma_release_channel(master->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) master->dma_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (master->dma_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) dma_release_channel(master->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) master->dma_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* This function is called by the DMA driver from tasklet context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static void dma_callback(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct spi_master *master = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) as->current_transfer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) complete(&as->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * Next transfer using PIO without FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static void atmel_spi_next_xfer_single(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* Make sure data is not remaining in RDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) spi_readl(as, RDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) spi_readl(as, RDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (xfer->bits_per_word > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) dev_dbg(master->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) " start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) xfer->bits_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* Enable relevant interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * Next transfer using PIO with FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static void atmel_spi_next_xfer_fifo(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) u32 current_remaining_data, num_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) u32 offset = xfer->len - as->current_remaining_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) const u16 *words = (const u16 *)((u8 *)xfer->tx_buf + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) const u8 *bytes = (const u8 *)((u8 *)xfer->tx_buf + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) u16 td0, td1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) u32 fifomr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_fifo\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* Compute the number of data to transfer in the current iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) current_remaining_data = ((xfer->bits_per_word > 8) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ((u32)as->current_remaining_bytes >> 1) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) (u32)as->current_remaining_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) num_data = min(current_remaining_data, as->fifo_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Flush RX and TX FIFOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) spi_writel(as, CR, SPI_BIT(RXFCLR) | SPI_BIT(TXFCLR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) while (spi_readl(as, FLR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* Set RX FIFO Threshold to the number of data to transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) fifomr = spi_readl(as, FMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) spi_writel(as, FMR, SPI_BFINS(RXFTHRES, num_data, fifomr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* Clear FIFO flags in the Status Register, especially RXFTHF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) (void)spi_readl(as, SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Fill TX FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) while (num_data >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (xfer->bits_per_word > 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) td0 = *words++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) td1 = *words++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) td0 = *bytes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) td1 = *bytes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) spi_writel(as, TDR, (td1 << 16) | td0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) num_data -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (num_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (xfer->bits_per_word > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) td0 = *words++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) td0 = *bytes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) spi_writew(as, TDR, td0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) num_data--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) dev_dbg(master->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) " start fifo xfer %p: len %u tx %p rx %p bitpw %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) xfer->bits_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * Enable RX FIFO Threshold Flag interrupt to be notified about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * transfer completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) spi_writel(as, IER, SPI_BIT(RXFTHF) | SPI_BIT(OVRES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * Next transfer using PIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static void atmel_spi_next_xfer_pio(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (as->fifo_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) atmel_spi_next_xfer_fifo(master, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) atmel_spi_next_xfer_single(master, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * Submit next transfer for DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct spi_transfer *xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) u32 *plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) __must_hold(&as->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct dma_chan *rxchan = master->dma_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct dma_chan *txchan = master->dma_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct dma_async_tx_descriptor *rxdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct dma_async_tx_descriptor *txdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct dma_slave_config slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* Check that the channels are available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (!rxchan || !txchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /* release lock for DMA operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) atmel_spi_unlock(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) *plen = xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (atmel_spi_dma_slave_config(as, &slave_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) xfer->bits_per_word))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) goto err_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Send both scatterlists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (atmel_spi_is_vmalloc_xfer(xfer) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) rxdesc = dmaengine_prep_slave_single(rxchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) as->dma_addr_rx_bbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) xfer->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) DMA_PREP_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) rxdesc = dmaengine_prep_slave_sg(rxchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) xfer->rx_sg.sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) xfer->rx_sg.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) DMA_PREP_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (!rxdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) goto err_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (atmel_spi_is_vmalloc_xfer(xfer) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) txdesc = dmaengine_prep_slave_single(txchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) as->dma_addr_tx_bbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) xfer->len, DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) DMA_PREP_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) txdesc = dmaengine_prep_slave_sg(txchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) xfer->tx_sg.sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) xfer->tx_sg.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) DMA_PREP_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (!txdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) goto err_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) dev_dbg(master->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) " start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) xfer->rx_buf, (unsigned long long)xfer->rx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* Enable relevant interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) spi_writel(as, IER, SPI_BIT(OVRES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* Put the callback on the RX transfer only, that should finish last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) rxdesc->callback = dma_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) rxdesc->callback_param = master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* Submit and fire RX and TX with TX last so we're ready to read! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) cookie = rxdesc->tx_submit(rxdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (dma_submit_error(cookie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) goto err_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) cookie = txdesc->tx_submit(txdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (dma_submit_error(cookie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) goto err_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) rxchan->device->device_issue_pending(rxchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) txchan->device->device_issue_pending(txchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* take back lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) atmel_spi_lock(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) err_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) spi_writel(as, IDR, SPI_BIT(OVRES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) atmel_spi_stop_dma(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) err_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) atmel_spi_lock(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static void atmel_spi_next_xfer_data(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct spi_transfer *xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) dma_addr_t *tx_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) dma_addr_t *rx_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) u32 *plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) *rx_dma = xfer->rx_dma + xfer->len - *plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) *tx_dma = xfer->tx_dma + xfer->len - *plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (*plen > master->max_dma_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) *plen = master->max_dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) u32 scbr, csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) unsigned long bus_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) int chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) chip_select = as->native_cs_for_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) chip_select = spi->chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* v1 chips start out at half the peripheral bus speed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) bus_hz = as->spi_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (!atmel_spi_is_v2(as))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) bus_hz /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * Calculate the lowest divider that satisfies the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * constraint, assuming div32/fdiv/mbz == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * If the resulting divider doesn't fit into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * register bitfield, we can't satisfy the constraint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (scbr >= (1 << SPI_SCBR_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dev_err(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) xfer->speed_hz, scbr, bus_hz/255);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (scbr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) dev_err(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) "setup: %d Hz too high, scbr %u; max %ld Hz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) xfer->speed_hz, scbr, bus_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) csr = spi_readl(as, CSR0 + 4 * chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) csr = SPI_BFINS(SCBR, scbr, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) spi_writel(as, CSR0 + 4 * chip_select, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) xfer->effective_speed_hz = bus_hz / scbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * Submit next transfer for PDC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * lock is held, spi irq is blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static void atmel_spi_pdc_next_xfer(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct spi_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) dma_addr_t tx_dma, rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) len = as->current_remaining_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) as->current_remaining_bytes -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) spi_writel(as, RPR, rx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) spi_writel(as, TPR, tx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (msg->spi->bits_per_word > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) len >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) spi_writel(as, RCR, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) spi_writel(as, TCR, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) dev_dbg(&msg->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) xfer, xfer->len, xfer->tx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) (unsigned long long)xfer->tx_dma, xfer->rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) (unsigned long long)xfer->rx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (as->current_remaining_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) len = as->current_remaining_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) as->current_remaining_bytes -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) spi_writel(as, RNPR, rx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) spi_writel(as, TNPR, tx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (msg->spi->bits_per_word > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) len >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) spi_writel(as, RNCR, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) spi_writel(as, TNCR, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) dev_dbg(&msg->spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) " next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) xfer, xfer->len, xfer->tx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) (unsigned long long)xfer->tx_dma, xfer->rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) (unsigned long long)xfer->rx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* REVISIT: We're waiting for RXBUFF before we start the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * transfer because we need to handle some difficult timing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * issues otherwise. If we wait for TXBUFE in one transfer and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * then starts waiting for RXBUFF in the next, it's difficult
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * to tell the difference between the RXBUFF interrupt we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * actually waiting for and the RXBUFF interrupt of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * previous transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * It should be doable, though. Just not now...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * - The buffer is either valid for CPU access, else NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * - If the buffer is valid, so is its DMA address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * This driver manages the dma address unless message->is_dma_mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct device *dev = &as->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (xfer->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* tx_buf is a const void* where we need a void * for the dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) void *nonconst_tx = (void *)xfer->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) xfer->tx_dma = dma_map_single(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) nonconst_tx, xfer->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (dma_mapping_error(dev, xfer->tx_dma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (xfer->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) xfer->rx_dma = dma_map_single(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) xfer->rx_buf, xfer->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (dma_mapping_error(dev, xfer->rx_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) dma_unmap_single(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) xfer->tx_dma, xfer->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (xfer->tx_dma != INVALID_DMA_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) dma_unmap_single(master->dev.parent, xfer->tx_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) xfer->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (xfer->rx_dma != INVALID_DMA_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) dma_unmap_single(master->dev.parent, xfer->rx_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) xfer->len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) atmel_spi_pump_single_data(struct atmel_spi *as, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) u8 *rxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) u16 *rxp16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (xfer->bits_per_word > 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) *rxp16 = spi_readl(as, RDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) *rxp = spi_readl(as, RDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (xfer->bits_per_word > 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (as->current_remaining_bytes > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) as->current_remaining_bytes -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) as->current_remaining_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) as->current_remaining_bytes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) atmel_spi_pump_fifo_data(struct atmel_spi *as, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) u32 fifolr = spi_readl(as, FLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) u32 num_bytes, num_data = SPI_BFEXT(RXFL, fifolr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) u32 offset = xfer->len - as->current_remaining_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) u16 *words = (u16 *)((u8 *)xfer->rx_buf + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) u8 *bytes = (u8 *)((u8 *)xfer->rx_buf + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) u16 rd; /* RD field is the lowest 16 bits of RDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* Update the number of remaining bytes to transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) num_bytes = ((xfer->bits_per_word > 8) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) (num_data << 1) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) num_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (as->current_remaining_bytes > num_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) as->current_remaining_bytes -= num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) as->current_remaining_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* Handle odd number of bytes when data are more than 8bit width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (xfer->bits_per_word > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) as->current_remaining_bytes &= ~0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* Read data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) while (num_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) rd = spi_readl(as, RDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (xfer->bits_per_word > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) *words++ = rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) *bytes++ = rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) num_data--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /* Called from IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * Must update "current_remaining_bytes" to keep track of data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * to transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (as->fifo_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) atmel_spi_pump_fifo_data(as, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) atmel_spi_pump_single_data(as, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* Interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * No need for locking in this Interrupt handler: done_status is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * only information modified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) atmel_spi_pio_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct spi_master *master = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) u32 status, pending, imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) imr = spi_readl(as, IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) status = spi_readl(as, SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) pending = status & imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (pending & SPI_BIT(OVRES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) spi_writel(as, IDR, SPI_BIT(OVRES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) dev_warn(master->dev.parent, "overrun\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * When we get an overrun, we disregard the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * transfer. Data will not be copied back from any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * bounce buffer and msg->actual_len will not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * updated with the last xfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * We will also not process any remaning transfers in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * the message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) as->done_status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /* Clear any overrun happening while cleaning up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) spi_readl(as, SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) complete(&as->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) } else if (pending & (SPI_BIT(RDRF) | SPI_BIT(RXFTHF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) atmel_spi_lock(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (as->current_remaining_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) xfer = as->current_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) atmel_spi_pump_pio_data(as, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (!as->current_remaining_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) spi_writel(as, IDR, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) complete(&as->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) atmel_spi_unlock(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) spi_writel(as, IDR, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) atmel_spi_pdc_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct spi_master *master = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) u32 status, pending, imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) int ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) imr = spi_readl(as, IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) status = spi_readl(as, SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) pending = status & imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (pending & SPI_BIT(OVRES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) | SPI_BIT(OVRES)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /* Clear any overrun happening while cleaning up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) spi_readl(as, SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) as->done_status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) complete(&as->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) spi_writel(as, IDR, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) complete(&as->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static int atmel_word_delay_csr(struct spi_device *spi, struct atmel_spi *as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) struct spi_delay *delay = &spi->word_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) u32 value = delay->value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) switch (delay->unit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) case SPI_DELAY_UNIT_NSECS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) value /= 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) case SPI_DELAY_UNIT_USECS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return (as->spi_clk / 1000000 * value) >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) static void initialize_native_cs_for_gpio(struct atmel_spi *as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct spi_master *master = platform_get_drvdata(as->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (!as->native_cs_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return; /* already initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (!master->cs_gpiods)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return; /* No CS GPIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * On the first version of the controller (AT91RM9200), CS0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * can't be used associated with GPIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (atmel_spi_is_v2(as))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) for (; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (master->cs_gpiods[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) as->native_cs_free |= BIT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (as->native_cs_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) as->native_cs_for_gpio = ffs(as->native_cs_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static int atmel_spi_setup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) struct atmel_spi *as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct atmel_spi_device *asd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) unsigned int bits = spi->bits_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) int chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) int word_delay_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) as = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /* see notes above re chipselect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* Setup() is called during spi_register_controller(aka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * spi_register_master) but after all membmers of the cs_gpiod
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * array have been filled, so we can looked for which native
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * CS will be free for using with GPIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) initialize_native_cs_for_gpio(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (spi->cs_gpiod && as->native_cs_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) dev_err(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) "No native CS available to support this GPIO CS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) chip_select = as->native_cs_for_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) chip_select = spi->chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) csr = SPI_BF(BITS, bits - 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (spi->mode & SPI_CPOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) csr |= SPI_BIT(CPOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (!(spi->mode & SPI_CPHA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) csr |= SPI_BIT(NCPHA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (!spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) csr |= SPI_BIT(CSAAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) csr |= SPI_BF(DLYBS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) word_delay_csr = atmel_word_delay_csr(spi, as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (word_delay_csr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return word_delay_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /* DLYBCT adds delays between words. This is useful for slow devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * that need a bit of time to setup the next transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) csr |= SPI_BF(DLYBCT, word_delay_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) asd = spi->controller_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (!asd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (!asd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) spi->controller_state = asd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) asd->csr = csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) dev_dbg(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) "setup: bpw %u mode 0x%x -> csr%d %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) bits, spi->mode, spi->chip_select, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (!atmel_spi_is_v2(as))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) spi_writel(as, CSR0 + 4 * chip_select, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static int atmel_spi_one_transfer(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct spi_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct atmel_spi *as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct spi_device *spi = msg->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) u8 bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) struct atmel_spi_device *asd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) unsigned long dma_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) dev_dbg(&spi->dev, "missing rx or tx buf\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) asd = spi->controller_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) bits = (asd->csr >> 4) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (bits != xfer->bits_per_word - 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) dev_dbg(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) "you can't yet change bits_per_word in transfers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * DMA map early, for performance (empties dcache ASAP) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * better fault reporting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if ((!msg->is_dma_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) && as->use_pdc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (atmel_spi_dma_map_xfer(as, xfer) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) atmel_spi_set_xfer_speed(as, msg->spi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) as->done_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) as->current_transfer = xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) as->current_remaining_bytes = xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) while (as->current_remaining_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) reinit_completion(&as->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (as->use_pdc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) atmel_spi_pdc_next_xfer(master, msg, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) } else if (atmel_spi_use_dma(as, xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) len = as->current_remaining_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) ret = atmel_spi_next_xfer_dma_submit(master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) xfer, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) dev_err(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) "unable to use DMA, fallback to PIO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) atmel_spi_next_xfer_pio(master, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) as->current_remaining_bytes -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (as->current_remaining_bytes < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) as->current_remaining_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) atmel_spi_next_xfer_pio(master, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /* interrupts are disabled, so free the lock for schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) atmel_spi_unlock(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) SPI_DMA_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) atmel_spi_lock(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (WARN_ON(dma_timeout == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) dev_err(&spi->dev, "spi transfer timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) as->done_status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (as->done_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (as->done_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (as->use_pdc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) dev_warn(master->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) "overrun (%u/%u remaining)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) spi_readl(as, TCR), spi_readl(as, RCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * Clean up DMA registers and make sure the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * registers are empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) spi_writel(as, RNCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) spi_writel(as, TNCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) spi_writel(as, RCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) spi_writel(as, TCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) for (timeout = 1000; timeout; timeout--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (!timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) dev_warn(master->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) "timeout waiting for TXEMPTY");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) while (spi_readl(as, SR) & SPI_BIT(RDRF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) spi_readl(as, RDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /* Clear any overrun happening while cleaning up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) spi_readl(as, SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) } else if (atmel_spi_use_dma(as, xfer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) atmel_spi_stop_dma(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (!msg->is_dma_mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) && as->use_pdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) atmel_spi_dma_unmap_xfer(master, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* only update length if no error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) msg->actual_length += xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!msg->is_dma_mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) && as->use_pdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) atmel_spi_dma_unmap_xfer(master, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) spi_transfer_delay_exec(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (xfer->cs_change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (list_is_last(&xfer->transfer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) &msg->transfers)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) as->keep_cs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) cs_deactivate(as, msg->spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) cs_activate(as, msg->spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static int atmel_spi_transfer_one_message(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct atmel_spi *as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct spi_device *spi = msg->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) dev_dbg(&spi->dev, "new message %p submitted for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) msg, dev_name(&spi->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) atmel_spi_lock(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) cs_activate(as, spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) as->keep_cs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) msg->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) msg->actual_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) trace_spi_transfer_start(msg, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) ret = atmel_spi_one_transfer(master, msg, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) goto msg_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) trace_spi_transfer_stop(msg, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (as->use_pdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) atmel_spi_disable_pdc_transfer(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) dev_dbg(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) " xfer %p: len %u tx %p/%pad rx %p/%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) xfer, xfer->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) xfer->tx_buf, &xfer->tx_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) xfer->rx_buf, &xfer->rx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) msg_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (!as->keep_cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) cs_deactivate(as, msg->spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) atmel_spi_unlock(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) msg->status = as->done_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) spi_finalize_current_message(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) static void atmel_spi_cleanup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct atmel_spi_device *asd = spi->controller_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (!asd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) spi->controller_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) kfree(asd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) static inline unsigned int atmel_get_version(struct atmel_spi *as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return spi_readl(as, VERSION) & 0x00000fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static void atmel_get_caps(struct atmel_spi *as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) unsigned int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) version = atmel_get_version(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) as->caps.is_spi2 = version > 0x121;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) as->caps.has_wdrbt = version >= 0x210;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) as->caps.has_dma_support = version >= 0x212;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) as->caps.has_pdc_support = version < 0x212;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) static void atmel_spi_init(struct atmel_spi *as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) spi_writel(as, CR, SPI_BIT(SWRST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /* It is recommended to enable FIFOs first thing after reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (as->fifo_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) spi_writel(as, CR, SPI_BIT(FIFOEN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (as->caps.has_wdrbt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) | SPI_BIT(MSTR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (as->use_pdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) spi_writel(as, CR, SPI_BIT(SPIEN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static int atmel_spi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct resource *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct atmel_spi *as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) /* Select default pin state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) pinctrl_pm_select_default_state(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (!regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) clk = devm_clk_get(&pdev->dev, "spi_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /* setup spi core then atmel-specific driver state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) master = spi_alloc_master(&pdev->dev, sizeof(*as));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (!master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) /* the spi->mode bits understood by this driver: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) master->use_gpio_descriptors = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) master->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) master->bus_num = pdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) master->num_chipselect = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) master->setup = atmel_spi_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) master->transfer_one_message = atmel_spi_transfer_one_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) master->cleanup = atmel_spi_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) master->auto_runtime_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) master->max_dma_len = SPI_MAX_DMA_XFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) master->can_dma = atmel_spi_can_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) platform_set_drvdata(pdev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) spin_lock_init(&as->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) as->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) as->regs = devm_ioremap_resource(&pdev->dev, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (IS_ERR(as->regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) ret = PTR_ERR(as->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) goto out_unmap_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) as->phybase = regs->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) as->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) as->clk = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) init_completion(&as->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) atmel_get_caps(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) as->use_dma = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) as->use_pdc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (as->caps.has_dma_support) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) ret = atmel_spi_configure_dma(master, as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) as->use_dma = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) } else if (ret == -EPROBE_DEFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) goto out_unmap_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) } else if (as->caps.has_pdc_support) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) as->use_pdc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) SPI_MAX_DMA_XFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) &as->dma_addr_rx_bbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (!as->addr_rx_bbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) as->use_dma = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) SPI_MAX_DMA_XFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) &as->dma_addr_tx_bbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (!as->addr_tx_bbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) as->use_dma = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) as->addr_rx_bbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) as->dma_addr_rx_bbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (!as->use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) dev_info(master->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) " can not allocate dma coherent memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (as->caps.has_dma_support && !as->use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (as->use_pdc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 0, dev_name(&pdev->dev), master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 0, dev_name(&pdev->dev), master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) goto out_unmap_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) /* Initialize the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) ret = clk_prepare_enable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) goto out_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) as->spi_clk = clk_get_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) as->fifo_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) &as->fifo_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) dev_info(&pdev->dev, "Using FIFO (%u data)\n", as->fifo_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) atmel_spi_init(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) pm_runtime_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) pm_runtime_set_active(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) ret = devm_spi_register_master(&pdev->dev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) goto out_free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /* go! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) dev_info(&pdev->dev, "Atmel SPI Controller version 0x%x at 0x%08lx (irq %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) atmel_get_version(as), (unsigned long)regs->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) out_free_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) pm_runtime_set_suspended(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (as->use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) atmel_spi_release_dma(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) spi_writel(as, CR, SPI_BIT(SWRST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) clk_disable_unprepare(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) out_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) out_unmap_regs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) spi_master_put(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) static int atmel_spi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) struct spi_master *master = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) /* reset the hardware and block queue progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (as->use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) atmel_spi_stop_dma(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) atmel_spi_release_dma(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) as->addr_tx_bbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) as->dma_addr_tx_bbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) as->addr_rx_bbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) as->dma_addr_rx_bbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) spin_lock_irq(&as->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) spi_writel(as, CR, SPI_BIT(SWRST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) spi_readl(as, SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) spin_unlock_irq(&as->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) clk_disable_unprepare(as->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) static int atmel_spi_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) clk_disable_unprepare(as->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) pinctrl_pm_select_sleep_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static int atmel_spi_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) pinctrl_pm_select_default_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return clk_prepare_enable(as->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) static int atmel_spi_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) /* Stop the queue running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) ret = spi_master_suspend(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (!pm_runtime_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) atmel_spi_runtime_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) static int atmel_spi_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) struct atmel_spi *as = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) ret = clk_prepare_enable(as->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) atmel_spi_init(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) clk_disable_unprepare(as->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (!pm_runtime_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) ret = atmel_spi_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /* Start the queue running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) return spi_master_resume(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) static const struct dev_pm_ops atmel_spi_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) SET_RUNTIME_PM_OPS(atmel_spi_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) atmel_spi_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) #define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) #define ATMEL_SPI_PM_OPS NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) static const struct of_device_id atmel_spi_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) { .compatible = "atmel,at91rm9200-spi" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) static struct platform_driver atmel_spi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) .name = "atmel_spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) .pm = ATMEL_SPI_PM_OPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) .of_match_table = atmel_spi_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) .probe = atmel_spi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) .remove = atmel_spi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) module_platform_driver(atmel_spi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) MODULE_ALIAS("platform:atmel_spi");