^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Addy Ke <addy.ke@rock-chips.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pinctrl/devinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define DRIVER_NAME "rockchip-spi"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define ROCKCHIP_SPI_CLR_BITS(reg, bits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) writel_relaxed(readl_relaxed(reg) & ~(bits), reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define ROCKCHIP_SPI_SET_BITS(reg, bits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) writel_relaxed(readl_relaxed(reg) | (bits), reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* SPI register offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define ROCKCHIP_SPI_CTRLR0 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define ROCKCHIP_SPI_CTRLR1 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define ROCKCHIP_SPI_SSIENR 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define ROCKCHIP_SPI_SER 0x000c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define ROCKCHIP_SPI_BAUDR 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define ROCKCHIP_SPI_TXFTLR 0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define ROCKCHIP_SPI_RXFTLR 0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define ROCKCHIP_SPI_TXFLR 0x001c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define ROCKCHIP_SPI_RXFLR 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define ROCKCHIP_SPI_SR 0x0024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define ROCKCHIP_SPI_IPR 0x0028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define ROCKCHIP_SPI_IMR 0x002c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define ROCKCHIP_SPI_ISR 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define ROCKCHIP_SPI_RISR 0x0034
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define ROCKCHIP_SPI_ICR 0x0038
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define ROCKCHIP_SPI_DMACR 0x003c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define ROCKCHIP_SPI_DMATDLR 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define ROCKCHIP_SPI_DMARDLR 0x0044
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define ROCKCHIP_SPI_VERSION 0x0048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define ROCKCHIP_SPI_TXDR 0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define ROCKCHIP_SPI_RXDR 0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Bit fields in CTRLR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define CR0_DFS_OFFSET 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define CR0_DFS_4BIT 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define CR0_DFS_8BIT 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define CR0_DFS_16BIT 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define CR0_CFS_OFFSET 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define CR0_SCPH_OFFSET 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define CR0_SCPOL_OFFSET 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define CR0_CSM_OFFSET 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define CR0_CSM_KEEP 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* ss_n be high for half sclk_out cycles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define CR0_CSM_HALF 0X1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* ss_n be high for one sclk_out cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define CR0_CSM_ONE 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* ss_n to sclk_out delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define CR0_SSD_OFFSET 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * The period between ss_n active and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * sclk_out active is half sclk_out cycles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define CR0_SSD_HALF 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * The period between ss_n active and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * sclk_out active is one sclk_out cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define CR0_SSD_ONE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define CR0_EM_OFFSET 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define CR0_EM_LITTLE 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define CR0_EM_BIG 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define CR0_FBM_OFFSET 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define CR0_FBM_MSB 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define CR0_FBM_LSB 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define CR0_BHT_OFFSET 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define CR0_BHT_16BIT 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define CR0_BHT_8BIT 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define CR0_RSD_OFFSET 14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define CR0_RSD_MAX 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define CR0_FRF_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define CR0_FRF_SPI 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define CR0_FRF_SSP 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define CR0_FRF_MICROWIRE 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define CR0_XFM_OFFSET 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define CR0_XFM_TR 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define CR0_XFM_TO 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define CR0_XFM_RO 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define CR0_OPM_OFFSET 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define CR0_OPM_MASTER 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define CR0_OPM_SLAVE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define CR0_SOI_OFFSET 23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define CR0_MTM_OFFSET 0x21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Bit fields in SER, 2bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define SER_MASK 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* Bit fields in BAUDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define BAUDR_SCKDV_MIN 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define BAUDR_SCKDV_MAX 65534
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Bit fields in SR, 6bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define SR_MASK 0x3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define SR_BUSY (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define SR_TF_FULL (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define SR_TF_EMPTY (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define SR_RF_EMPTY (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define SR_RF_FULL (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define SR_SLAVE_TX_BUSY (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Bit fields in ISR, IMR, ISR, RISR, 5bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define INT_MASK 0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define INT_TF_EMPTY (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define INT_TF_OVERFLOW (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define INT_RF_UNDERFLOW (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define INT_RF_OVERFLOW (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define INT_RF_FULL (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define INT_CS_INACTIVE (1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Bit fields in ICR, 4bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define ICR_MASK 0x0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define ICR_ALL (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define ICR_RF_UNDERFLOW (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define ICR_RF_OVERFLOW (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define ICR_TF_OVERFLOW (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Bit fields in DMACR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define RF_DMA_EN (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define TF_DMA_EN (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Driver state flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define RXDMA (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define TXDMA (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* sclk_out: spi master internal logic in rk3x can support 50Mhz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define MAX_SCLK_OUT 50000000U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* max sclk of driver strength 4mA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define IO_DRIVER_4MA_MAX_SCLK_OUT 24000000U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * the controller seems to hang when given 0x10000, so stick with this for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* 2 for native cs, 2 for cs-gpio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define ROCKCHIP_SPI_MAX_CS_NUM 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define ROCKCHIP_SPI_VER2_TYPE1 0x05EC0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define ROCKCHIP_SPI_VER2_TYPE2 0x00110002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define ROCKCHIP_SPI_REGISTER_SIZE 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct rockchip_spi_quirks {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u32 max_baud_div_in_cpha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct rockchip_spi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct clk *spiclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct clk *apb_pclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct clk *sclk_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) dma_addr_t dma_addr_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dma_addr_t dma_addr_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) const void *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void *rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned int tx_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned int rx_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) atomic_t state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*depth of the FIFO buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u32 fifo_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* frequency of spiclk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u32 freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* speed of io rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u32 speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u8 n_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) u8 rsd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct pinctrl_state *high_speed_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) bool slave_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) bool cs_inactive; /* spi slave tansmition stop when cs inactive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) bool cs_high_supported; /* native CS supports active-high polarity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct spi_transfer *xfer; /* Store xfer temporarily */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) phys_addr_t base_addr_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct miscdevice miscdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* quirks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u32 max_baud_div_in_cpha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) writel_relaxed((enable ? 1U : 0U), rs->regs + ROCKCHIP_SPI_SSIENR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool slave_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned long timeout = jiffies + msecs_to_jiffies(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (slave_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_SLAVE_TX_BUSY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) !((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) } while (!time_after(jiffies, timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) dev_warn(rs->dev, "spi controller is in busy state!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static u32 get_fifo_len(struct rockchip_spi *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) u32 ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ver = readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) switch (ver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) case ROCKCHIP_SPI_VER2_TYPE1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) case ROCKCHIP_SPI_VER2_TYPE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct spi_controller *ctlr = spi->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Return immediately for no-op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (cs_asserted == rs->cs_asserted[spi->chip_select])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (cs_asserted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* Keep things powered as long as CS is asserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pm_runtime_get_sync(rs->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* Drop reference from when we first asserted CS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) pm_runtime_put(rs->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) rs->cs_asserted[spi->chip_select] = cs_asserted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void rockchip_spi_handle_err(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* stop running spi transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * this also flushes both rx and tx fifos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) spi_enable_chip(rs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* make sure all interrupts are masked and status cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (atomic_read(&rs->state) & TXDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) dmaengine_terminate_async(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (atomic_read(&rs->state) & RXDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) dmaengine_terminate_async(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) u32 tx_free = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) u32 words = min(rs->tx_left, tx_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) rs->tx_left -= words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) for (; words; words--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u32 txw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (rs->n_bytes == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) txw = *(u8 *)rs->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) txw = *(u16 *)rs->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) rs->tx += rs->n_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) u32 words = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) u32 rx_left = (rs->rx_left > words) ? rs->rx_left - words : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* the hardware doesn't allow us to change fifo threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * level while spi is enabled, so instead make sure to leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * enough words in the rx fifo to get the last interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * exactly when all words have been received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (rx_left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u32 ftl = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFTLR) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (rx_left < ftl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) rx_left = ftl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) words = rs->rx_left - rx_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) rs->rx_left = rx_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) for (; words; words--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (!rs->rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (rs->n_bytes == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *(u8 *)rs->rx = (u8)rxw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) *(u16 *)rs->rx = (u16)rxw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) rs->rx += rs->n_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct spi_controller *ctlr = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* When int_cs_inactive comes, spi slave abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (rs->cs_inactive && readl_relaxed(rs->regs + ROCKCHIP_SPI_IMR) & INT_CS_INACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ctlr->slave_abort(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (rs->tx_left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) rockchip_spi_pio_writer(rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) rockchip_spi_pio_reader(rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!rs->rx_left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) spi_enable_chip(rs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) spi_finalize_current_transfer(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static int rockchip_spi_prepare_irq(struct rockchip_spi *rs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) rs->tx = xfer->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rs->rx = xfer->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) rs->rx_left = xfer->len / rs->n_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) spi_enable_chip(rs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (rs->tx_left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) rockchip_spi_pio_writer(rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (rs->cs_inactive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* 1 means the transfer is in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void rockchip_spi_dma_rxcb(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct spi_controller *ctlr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int state = atomic_fetch_andnot(RXDMA, &rs->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (state & TXDMA && !rs->slave_abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (rs->cs_inactive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) spi_enable_chip(rs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) spi_finalize_current_transfer(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void rockchip_spi_dma_txcb(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct spi_controller *ctlr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int state = atomic_fetch_andnot(TXDMA, &rs->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (state & RXDMA && !rs->slave_abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* Wait until the FIFO data completely. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) wait_for_tx_idle(rs, ctlr->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) spi_enable_chip(rs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) spi_finalize_current_transfer(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static u32 rockchip_spi_calc_burst_size(u32 data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* burst size: 1, 2, 4, 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) for (i = 1; i < 8; i <<= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (data_len & i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct spi_controller *ctlr, struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct dma_async_tx_descriptor *rxdesc, *txdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) atomic_set(&rs->state, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rs->tx = xfer->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) rs->rx = xfer->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) rxdesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (xfer->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct dma_slave_config rxconf = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) .direction = DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) .src_addr = rs->dma_addr_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) .src_addr_width = rs->n_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) .src_maxburst = rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) dmaengine_slave_config(ctlr->dma_rx, &rxconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) rxdesc = dmaengine_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ctlr->dma_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) xfer->rx_sg.sgl, xfer->rx_sg.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (!rxdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) rxdesc->callback = rockchip_spi_dma_rxcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) rxdesc->callback_param = ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) txdesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (xfer->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct dma_slave_config txconf = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) .direction = DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) .dst_addr = rs->dma_addr_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) .dst_addr_width = rs->n_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) .dst_maxburst = rs->fifo_len / 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) dmaengine_slave_config(ctlr->dma_tx, &txconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) txdesc = dmaengine_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ctlr->dma_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) xfer->tx_sg.sgl, xfer->tx_sg.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (!txdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (rxdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) dmaengine_terminate_sync(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) txdesc->callback = rockchip_spi_dma_txcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) txdesc->callback_param = ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* rx must be started before tx due to spi instinct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (rxdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) atomic_or(RXDMA, &rs->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) ctlr->dma_rx->cookie = dmaengine_submit(rxdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) dma_async_issue_pending(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (rs->cs_inactive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) writel_relaxed(INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) spi_enable_chip(rs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (txdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) atomic_or(TXDMA, &rs->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) dmaengine_submit(txdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) dma_async_issue_pending(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* 1 means the transfer is in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static int rockchip_spi_config(struct rockchip_spi *rs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct spi_device *spi, struct spi_transfer *xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) bool use_dma, bool slave_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u32 cr0 = CR0_FRF_SPI << CR0_FRF_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) | CR0_BHT_8BIT << CR0_BHT_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) | CR0_SSD_ONE << CR0_SSD_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) | CR0_EM_BIG << CR0_EM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) u32 cr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) u32 dmacr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (slave_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) cr0 |= CR0_OPM_SLAVE << CR0_OPM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) rs->slave_abort = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) cr0 |= rs->rsd << CR0_RSD_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (spi->mode & SPI_LSB_FIRST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (spi->mode & SPI_CS_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (xfer->rx_buf && xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) cr0 |= CR0_XFM_TR << CR0_XFM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) else if (xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) cr0 |= CR0_XFM_RO << CR0_XFM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) else if (use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) cr0 |= CR0_XFM_TO << CR0_XFM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) switch (xfer->bits_per_word) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) cr0 |= CR0_DFS_4BIT << CR0_DFS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) cr1 = xfer->len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) cr0 |= CR0_DFS_8BIT << CR0_DFS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) cr1 = xfer->len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) cr0 |= CR0_DFS_16BIT << CR0_DFS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) cr1 = xfer->len / 2 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* we only whitelist 4, 8 and 16 bit words in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * ctlr->bits_per_word_mask, so this shouldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dev_err(rs->dev, "unknown bits per word: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) xfer->bits_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) dmacr |= TF_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dmacr |= RF_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * If speed is larger than IO_DRIVER_4MA_MAX_SCLK_OUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * set higher driver strength.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (rs->high_speed_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (rs->freq > IO_DRIVER_4MA_MAX_SCLK_OUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) pinctrl_select_state(rs->dev->pins->p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) rs->high_speed_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) pinctrl_select_state(rs->dev->pins->p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) rs->dev->pins->default_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) writel_relaxed(cr1, rs->regs + ROCKCHIP_SPI_CTRLR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* unfortunately setting the fifo threshold level to generate an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * interrupt exactly when the fifo is full doesn't seem to work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * so we need the strict inequality here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if ((xfer->len / rs->n_bytes) < rs->fifo_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) writel_relaxed(xfer->len / rs->n_bytes - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) writel_relaxed(rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) rs->regs + ROCKCHIP_SPI_DMARDLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (rs->max_baud_div_in_cpha && xfer->speed_hz != rs->speed_hz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* the minimum divisor is 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (rs->freq < 2 * xfer->speed_hz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) clk_set_rate(rs->spiclk, 2 * xfer->speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) rs->freq = clk_get_rate(rs->spiclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if ((spi->mode & SPI_CPHA) && (DIV_ROUND_UP(rs->freq, xfer->speed_hz) > rs->max_baud_div_in_cpha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) clk_set_rate(rs->spiclk, rs->max_baud_div_in_cpha * xfer->speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) rs->freq = clk_get_rate(rs->spiclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* the hardware only supports an even clock divisor, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * round divisor = spiclk / speed up to nearest even number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * so that the resulting speed is <= the requested speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) writel_relaxed(2 * DIV_ROUND_UP(rs->freq, 2 * xfer->speed_hz),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) rs->regs + ROCKCHIP_SPI_BAUDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) rs->speed_hz = xfer->speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return ROCKCHIP_SPI_MAX_TRANLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) u32 rx_fifo_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* Get current dma rx point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (atomic_read(&rs->state) & RXDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) dmaengine_pause(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) status = dmaengine_tx_status(ctlr->dma_rx, ctlr->dma_rx->cookie, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (status == DMA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) rs->rx = rs->xfer->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) rs->xfer->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) for (; rx_fifo_left; rx_fifo_left--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) rs->rx += rs->xfer->len - rs->n_bytes * state.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* Get the valid data left in rx fifo and set rs->xfer->len real rx size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (rs->rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) for (; rx_fifo_left; rx_fifo_left--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (rs->n_bytes == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) *(u8 *)rs->rx = (u8)rxw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) *(u16 *)rs->rx = (u16)rxw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) rs->rx += rs->n_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) rs->xfer->len = (unsigned int)(rs->rx - rs->xfer->rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (atomic_read(&rs->state) & RXDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) dmaengine_terminate_sync(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (atomic_read(&rs->state) & TXDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) dmaengine_terminate_sync(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) atomic_set(&rs->state, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) spi_enable_chip(rs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) rs->slave_abort = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) complete(&ctlr->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static int rockchip_spi_transfer_one(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) bool use_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* Zero length transfers won't trigger an interrupt on completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (!xfer->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) spi_finalize_current_transfer(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!xfer->tx_buf && !xfer->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) dev_err(rs->dev, "No buffer for transfer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (xfer->len > ROCKCHIP_SPI_MAX_TRANLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) dev_err(rs->dev, "Transfer is too long (%d)\n", xfer->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) rs->xfer = xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return rockchip_spi_prepare_dma(rs, ctlr, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return rockchip_spi_prepare_irq(rs, ctlr, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) unsigned int bytes_per_word = xfer->bits_per_word <= 8 ? 1 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* if the numbor of spi words to transfer is less than the fifo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * length we can just fill the fifo and wait for a single irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * so don't bother setting up dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return xfer->len / bytes_per_word >= rs->fifo_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static int rockchip_spi_setup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct rockchip_spi *rs = spi_controller_get_devdata(spi->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) u32 cr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH) && !rs->cs_high_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) pm_runtime_get_sync(rs->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) cr0 = readl_relaxed(rs->regs + ROCKCHIP_SPI_CTRLR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) cr0 |= ((spi->mode & 0x3) << CR0_SCPH_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (spi->mode & SPI_CS_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) pm_runtime_put(rs->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static int rockchip_spi_misc_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct miscdevice *misc = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct spi_controller *ctlr = dev_get_drvdata(misc->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) pm_runtime_get_sync(rs->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static int rockchip_spi_misc_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct miscdevice *misc = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct spi_controller *ctlr = dev_get_drvdata(misc->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) pm_runtime_put(rs->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static int rockchip_spi_mmap(struct file *filp, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct miscdevice *misc = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct spi_controller *ctlr = dev_get_drvdata(misc->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) size_t size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (size > ROCKCHIP_SPI_REGISTER_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) dev_warn(misc->parent, "mmap size is out of limitation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) vma->vm_flags |= VM_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) err = remap_pfn_range(vma, vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) __phys_to_pfn(rs->base_addr_phy),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) size, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static const struct file_operations rockchip_spi_misc_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) .open = rockchip_spi_misc_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) .release = rockchip_spi_misc_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) .mmap = rockchip_spi_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static int rockchip_spi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct rockchip_spi *rs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct resource *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) u32 rsd_nsecs, num_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) bool slave_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct pinctrl *pinctrl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) const struct rockchip_spi_quirks *quirks_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) slave_mode = of_property_read_bool(np, "spi-slave");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (slave_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ctlr = spi_alloc_slave(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) sizeof(struct rockchip_spi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) ctlr = spi_alloc_master(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) sizeof(struct rockchip_spi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (!ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) platform_set_drvdata(pdev, ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ctlr->slave = slave_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /* Get basic io resource and map it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) rs->regs = devm_ioremap_resource(&pdev->dev, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (IS_ERR(rs->regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ret = PTR_ERR(rs->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) goto err_put_ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) rs->base_addr_phy = mem->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (!has_acpi_companion(&pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (IS_ERR(rs->apb_pclk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) dev_err(&pdev->dev, "Failed to get apb_pclk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ret = PTR_ERR(rs->apb_pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) goto err_put_ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (!has_acpi_companion(&pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (IS_ERR(rs->spiclk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) dev_err(&pdev->dev, "Failed to get spi_pclk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ret = PTR_ERR(rs->spiclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) goto err_put_ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) rs->sclk_in = devm_clk_get_optional(&pdev->dev, "sclk_in");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (IS_ERR(rs->sclk_in)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dev_err(&pdev->dev, "Failed to get sclk_in\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ret = PTR_ERR(rs->sclk_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) goto err_put_ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) ret = clk_prepare_enable(rs->apb_pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) goto err_put_ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ret = clk_prepare_enable(rs->spiclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) dev_err(&pdev->dev, "Failed to enable spi_clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) goto err_disable_apbclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ret = clk_prepare_enable(rs->sclk_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) dev_err(&pdev->dev, "Failed to enable sclk_in\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) goto err_disable_spiclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) spi_enable_chip(rs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ret = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) goto err_disable_sclk_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ret = devm_request_threaded_irq(&pdev->dev, ret, rockchip_spi_isr, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) IRQF_ONESHOT, dev_name(&pdev->dev), ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) goto err_disable_sclk_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) rs->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) rs->freq = clk_get_rate(rs->spiclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (!rs->freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) ret = device_property_read_u32(&pdev->dev, "clock-frequency", &rs->freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) dev_warn(rs->dev, "Failed to get clock or clock-frequency property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) goto err_disable_sclk_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (!device_property_read_u32(&pdev->dev, "rx-sample-delay-ns", &rsd_nsecs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /* rx sample delay is expressed in parent clock cycles (max 3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) u32 rsd = DIV_ROUND_CLOSEST(rsd_nsecs * (rs->freq >> 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) 1000000000 >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (!rsd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) dev_warn(rs->dev, "%u Hz are too slow to express %u ns delay\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) rs->freq, rsd_nsecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) } else if (rsd > CR0_RSD_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) rsd = CR0_RSD_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) dev_warn(rs->dev, "%u Hz are too fast to express %u ns delay, clamping at %u ns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) rs->freq, rsd_nsecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) CR0_RSD_MAX * 1000000000U / rs->freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) rs->rsd = rsd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) rs->fifo_len = get_fifo_len(rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (!rs->fifo_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) dev_err(&pdev->dev, "Failed to get fifo length\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) goto err_disable_sclk_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) quirks_cfg = device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (quirks_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) rs->max_baud_div_in_cpha = quirks_cfg->max_baud_div_in_cpha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) pm_runtime_set_active(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) ctlr->auto_runtime_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) ctlr->bus_num = pdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (slave_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) ctlr->mode_bits |= SPI_NO_CS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) ctlr->slave_abort = rockchip_spi_slave_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) ctlr->flags = SPI_MASTER_GPIO_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ctlr->max_native_cs = ROCKCHIP_SPI_MAX_CS_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * rk spi0 has two native cs, spi1..5 one cs only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * if num-cs is missing in the dts, default to 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (device_property_read_u32(&pdev->dev, "num-cs", &num_cs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) num_cs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) ctlr->num_chipselect = num_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) ctlr->use_gpio_descriptors = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) ctlr->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) ctlr->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8) | SPI_BPW_MASK(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) ctlr->setup = rockchip_spi_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) ctlr->set_cs = rockchip_spi_set_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ctlr->transfer_one = rockchip_spi_transfer_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) ctlr->max_transfer_size = rockchip_spi_max_transfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) ctlr->handle_err = rockchip_spi_handle_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ctlr->dma_tx = dma_request_chan(rs->dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (IS_ERR(ctlr->dma_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /* Check tx to see if we need defer probing driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (PTR_ERR(ctlr->dma_tx) == -EPROBE_DEFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) ret = -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) goto err_disable_pm_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) dev_warn(rs->dev, "Failed to request TX DMA channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ctlr->dma_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ctlr->dma_rx = dma_request_chan(rs->dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (IS_ERR(ctlr->dma_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (PTR_ERR(ctlr->dma_rx) == -EPROBE_DEFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ret = -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) goto err_free_dma_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) dev_warn(rs->dev, "Failed to request RX DMA channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ctlr->dma_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (ctlr->dma_tx && ctlr->dma_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) rs->dma_addr_tx = mem->start + ROCKCHIP_SPI_TXDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) rs->dma_addr_rx = mem->start + ROCKCHIP_SPI_RXDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ctlr->can_dma = rockchip_spi_can_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) switch (readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) case ROCKCHIP_SPI_VER2_TYPE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) rs->cs_high_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) ctlr->mode_bits |= SPI_CS_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (ctlr->can_dma && slave_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) rs->cs_inactive = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) rs->cs_inactive = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) rs->cs_inactive = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) pinctrl = devm_pinctrl_get(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (!IS_ERR(pinctrl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) rs->high_speed_state = pinctrl_lookup_state(pinctrl, "high_speed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (IS_ERR_OR_NULL(rs->high_speed_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) dev_warn(&pdev->dev, "no high_speed pinctrl state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) rs->high_speed_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ret = devm_spi_register_controller(&pdev->dev, ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) dev_err(&pdev->dev, "Failed to register controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) goto err_free_dma_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (IS_ENABLED(CONFIG_SPI_ROCKCHIP_MISCDEV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) char misc_name[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) snprintf(misc_name, sizeof(misc_name), "rkspi-dev%d", ctlr->bus_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) rs->miscdev.minor = MISC_DYNAMIC_MINOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) rs->miscdev.name = misc_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) rs->miscdev.fops = &rockchip_spi_misc_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) rs->miscdev.parent = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ret = misc_register(&rs->miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) dev_err(&pdev->dev, "failed to register misc device %s\n", misc_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) dev_info(&pdev->dev, "register misc device %s\n", misc_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) err_free_dma_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (ctlr->dma_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) dma_release_channel(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) err_free_dma_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (ctlr->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) dma_release_channel(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) err_disable_pm_runtime:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) err_disable_sclk_in:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) clk_disable_unprepare(rs->sclk_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) err_disable_spiclk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) clk_disable_unprepare(rs->spiclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) err_disable_apbclk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) clk_disable_unprepare(rs->apb_pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) err_put_ctlr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) spi_controller_put(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static int rockchip_spi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct spi_controller *ctlr = spi_controller_get(platform_get_drvdata(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (IS_ENABLED(CONFIG_SPI_ROCKCHIP_MISCDEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) misc_deregister(&rs->miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) clk_disable_unprepare(rs->sclk_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) clk_disable_unprepare(rs->spiclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) clk_disable_unprepare(rs->apb_pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) pm_runtime_set_suspended(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (ctlr->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) dma_release_channel(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (ctlr->dma_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) dma_release_channel(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) spi_controller_put(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static int rockchip_spi_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct spi_controller *ctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) clk_disable_unprepare(rs->spiclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) clk_disable_unprepare(rs->apb_pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static int rockchip_spi_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) struct spi_controller *ctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) ret = clk_prepare_enable(rs->apb_pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) ret = clk_prepare_enable(rs->spiclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) clk_disable_unprepare(rs->apb_pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static int rockchip_spi_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct spi_controller *ctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) ret = spi_controller_suspend(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* Avoid redundant clock disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (!pm_runtime_status_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) rockchip_spi_runtime_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) pinctrl_pm_select_sleep_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) static int rockchip_spi_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct spi_controller *ctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) pinctrl_pm_select_default_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (!pm_runtime_status_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) ret = rockchip_spi_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) ret = spi_controller_resume(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) rockchip_spi_runtime_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static const struct dev_pm_ops rockchip_spi_pm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) rockchip_spi_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) static const struct rockchip_spi_quirks rockchip_spi_quirks_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) .max_baud_div_in_cpha = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) static const struct of_device_id rockchip_spi_dt_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) .compatible = "rockchip,px30-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) .data = &rockchip_spi_quirks_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) { .compatible = "rockchip,rk3036-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) { .compatible = "rockchip,rk3066-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) { .compatible = "rockchip,rk3188-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) { .compatible = "rockchip,rk3228-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) { .compatible = "rockchip,rk3288-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) { .compatible = "rockchip,rk3308-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) { .compatible = "rockchip,rk3328-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) { .compatible = "rockchip,rk3368-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) { .compatible = "rockchip,rk3399-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) { .compatible = "rockchip,rv1106-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) { .compatible = "rockchip,rv1108-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) { .compatible = "rockchip,rv1126-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static struct platform_driver rockchip_spi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) .pm = &rockchip_spi_pm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) .of_match_table = of_match_ptr(rockchip_spi_dt_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) .probe = rockchip_spi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) .remove = rockchip_spi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) module_platform_driver(rockchip_spi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) MODULE_LICENSE("GPL v2");