^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SPI driver for NVIDIA's Tegra114 SPI Controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define SPI_COMMAND1 0x000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define SPI_PACKED (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define SPI_TX_EN (1 << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define SPI_RX_EN (1 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define SPI_BOTH_EN_BYTE (1 << 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define SPI_BOTH_EN_BIT (1 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define SPI_LSBYTE_FE (1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define SPI_LSBIT_FE (1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define SPI_BIDIROE (1 << 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define SPI_IDLE_SDA_DRIVE_LOW (0 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define SPI_IDLE_SDA_DRIVE_HIGH (1 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define SPI_IDLE_SDA_PULL_LOW (2 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define SPI_IDLE_SDA_PULL_HIGH (3 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define SPI_IDLE_SDA_MASK (3 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define SPI_CS_SW_VAL (1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define SPI_CS_SW_HW (1 << 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* SPI_CS_POL_INACTIVE bits are default high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* n from 0 to 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define SPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SPI_CS_POL_INACTIVE_MASK (0xF << 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SPI_CS_SEL_0 (0 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SPI_CS_SEL_1 (1 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define SPI_CS_SEL_2 (2 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define SPI_CS_SEL_3 (3 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define SPI_CS_SEL_MASK (3 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define SPI_CS_SEL(x) (((x) & 0x3) << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SPI_CONTROL_MODE_0 (0 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SPI_CONTROL_MODE_1 (1 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SPI_CONTROL_MODE_2 (2 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define SPI_CONTROL_MODE_3 (3 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define SPI_CONTROL_MODE_MASK (3 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define SPI_MODE_SEL(x) (((x) & 0x3) << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define SPI_M_S (1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SPI_PIO (1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SPI_COMMAND2 0x004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SPI_TX_TAP_DELAY(x) (((x) & 0x3F) << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define SPI_RX_TAP_DELAY(x) (((x) & 0x3F) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define SPI_CS_TIMING1 0x008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define SPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define SPI_CS_SETUP_HOLD(reg, cs, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ((((val) & 0xFFu) << ((cs) * 8)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ((reg) & ~(0xFFu << ((cs) * 8))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define SPI_CS_TIMING2 0x00C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1F) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define CS_ACTIVE_BETWEEN_PACKETS_0 (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define CYCLES_BETWEEN_PACKETS_1(x) (((x) & 0x1F) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define CS_ACTIVE_BETWEEN_PACKETS_1 (1 << 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define CYCLES_BETWEEN_PACKETS_2(x) (((x) & 0x1F) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define CS_ACTIVE_BETWEEN_PACKETS_2 (1 << 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define CYCLES_BETWEEN_PACKETS_3(x) (((x) & 0x1F) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define CS_ACTIVE_BETWEEN_PACKETS_3 (1 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) (reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ((reg) & ~(1 << ((cs) * 8 + 5))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) (reg = (((val) & 0x1F) << ((cs) * 8)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ((reg) & ~(0x1F << ((cs) * 8))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define MAX_SETUP_HOLD_CYCLES 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define MAX_INACTIVE_CYCLES 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define SPI_TRANS_STATUS 0x010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define SPI_SLV_IDLE_COUNT(val) (((val) >> 16) & 0xFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define SPI_RDY (1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define SPI_FIFO_STATUS 0x014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define SPI_RX_FIFO_EMPTY (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define SPI_RX_FIFO_FULL (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define SPI_TX_FIFO_EMPTY (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define SPI_TX_FIFO_FULL (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define SPI_RX_FIFO_UNF (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define SPI_RX_FIFO_OVF (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define SPI_TX_FIFO_UNF (1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define SPI_TX_FIFO_OVF (1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define SPI_ERR (1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define SPI_TX_FIFO_FLUSH (1 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define SPI_RX_FIFO_FLUSH (1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define SPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define SPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define SPI_FRAME_END (1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define SPI_CS_INACTIVE (1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define SPI_FIFO_ERROR (SPI_RX_FIFO_UNF | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define SPI_FIFO_EMPTY (SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define SPI_TX_DATA 0x018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define SPI_RX_DATA 0x01C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define SPI_DMA_CTL 0x020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define SPI_TX_TRIG_1 (0 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define SPI_TX_TRIG_4 (1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define SPI_TX_TRIG_8 (2 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define SPI_TX_TRIG_16 (3 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define SPI_TX_TRIG_MASK (3 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define SPI_RX_TRIG_1 (0 << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define SPI_RX_TRIG_4 (1 << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define SPI_RX_TRIG_8 (2 << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define SPI_RX_TRIG_16 (3 << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define SPI_RX_TRIG_MASK (3 << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define SPI_IE_TX (1 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define SPI_IE_RX (1 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define SPI_CONT (1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define SPI_DMA (1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define SPI_DMA_EN SPI_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define SPI_DMA_BLK 0x024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define SPI_DMA_BLK_SET(x) (((x) & 0xFFFF) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define SPI_TX_FIFO 0x108
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define SPI_RX_FIFO 0x188
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define SPI_INTR_MASK 0x18c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define SPI_INTR_ALL_MASK (0x1fUL << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define MAX_CHIP_SELECT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define SPI_FIFO_DEPTH 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define DATA_DIR_TX (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define DATA_DIR_RX (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define TX_FIFO_EMPTY_COUNT_MAX SPI_TX_FIFO_EMPTY_COUNT(0x40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define RX_FIFO_FULL_COUNT_ZERO SPI_RX_FIFO_FULL_COUNT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define MAX_HOLD_CYCLES 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define SPI_DEFAULT_SPEED 25000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct tegra_spi_soc_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) bool has_intr_mask_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct tegra_spi_client_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int tx_clk_tap_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int rx_clk_tap_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct tegra_spi_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct reset_control *rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u32 cur_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct spi_device *cur_spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct spi_device *cs_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned cur_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) unsigned words_per_32bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) unsigned curr_dma_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned cur_direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned cur_rx_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned cur_tx_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned dma_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned max_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) bool is_curr_dma_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) bool use_hw_based_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct completion rx_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct completion tx_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) u32 tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u32 rx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 status_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) bool is_packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u32 command1_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u32 dma_control_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u32 def_command1_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) u32 def_command2_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) u32 spi_cs_timing1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u32 spi_cs_timing2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u8 last_used_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct completion xfer_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct spi_transfer *curr_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct dma_chan *rx_dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) u32 *rx_dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) dma_addr_t rx_dma_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct dma_async_tx_descriptor *rx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct dma_chan *tx_dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u32 *tx_dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) dma_addr_t tx_dma_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct dma_async_tx_descriptor *tx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) const struct tegra_spi_soc_data *soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int tegra_spi_runtime_suspend(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static int tegra_spi_runtime_resume(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static inline u32 tegra_spi_readl(struct tegra_spi_data *tspi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned long reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return readl(tspi->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 val, unsigned long reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) writel(val, tspi->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Read back register to make sure that register writes completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (reg != SPI_TX_FIFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) readl(tspi->base + SPI_COMMAND1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* Write 1 to clear status register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) tegra_spi_writel(tspi, val, SPI_TRANS_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Clear fifo status error if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) val = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (val & SPI_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) SPI_FIFO_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static unsigned tegra_spi_calculate_curr_xfer_param(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct spi_device *spi, struct tegra_spi_data *tspi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) unsigned remain_len = t->len - tspi->cur_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unsigned max_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned bits_per_word = t->bits_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned max_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned total_fifo_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if ((bits_per_word == 8 || bits_per_word == 16 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bits_per_word == 32) && t->len > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) tspi->is_packed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) tspi->words_per_32bit = 32/bits_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) tspi->is_packed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) tspi->words_per_32bit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (tspi->is_packed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) max_len = min(remain_len, tspi->max_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) tspi->curr_dma_words = max_len/tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) total_fifo_words = (max_len + 3) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) max_word = min(max_word, tspi->max_buf_size/4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) tspi->curr_dma_words = max_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) total_fifo_words = max_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return total_fifo_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct tegra_spi_data *tspi, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) unsigned nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned tx_empty_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) u32 fifo_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) unsigned max_n_32bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) unsigned i, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned int written_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned fifo_words_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) tx_empty_count = SPI_TX_FIFO_EMPTY_COUNT(fifo_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (tspi->is_packed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) fifo_words_left = tx_empty_count * tspi->words_per_32bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) written_words = min(fifo_words_left, tspi->curr_dma_words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) nbytes = written_words * tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) max_n_32bit = DIV_ROUND_UP(nbytes, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) for (count = 0; count < max_n_32bit; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) u32 x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) for (i = 0; (i < 4) && nbytes; i++, nbytes--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) x |= (u32)(*tx_buf++) << (i * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) tegra_spi_writel(tspi, x, SPI_TX_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) unsigned int write_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) written_words = max_n_32bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) nbytes = written_words * tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (nbytes > t->len - tspi->cur_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) nbytes = t->len - tspi->cur_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) write_bytes = nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) for (count = 0; count < max_n_32bit; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) u32 x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) for (i = 0; nbytes && (i < tspi->bytes_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) i++, nbytes--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) x |= (u32)(*tx_buf++) << (i * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) tegra_spi_writel(tspi, x, SPI_TX_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) tspi->cur_tx_pos += write_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return written_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct tegra_spi_data *tspi, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unsigned rx_full_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u32 fifo_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) unsigned i, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) unsigned int read_words = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) unsigned len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (tspi->is_packed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) len = tspi->curr_dma_words * tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) for (count = 0; count < rx_full_count; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) for (i = 0; len && (i < 4); i++, len--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) *rx_buf++ = (x >> i*8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) read_words += tspi->curr_dma_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) u8 bytes_per_word = tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) unsigned int read_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) len = rx_full_count * bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (len > t->len - tspi->cur_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) len = t->len - tspi->cur_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) read_bytes = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) for (count = 0; count < rx_full_count; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) for (i = 0; len && (i < bytes_per_word); i++, len--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) *rx_buf++ = (x >> (i*8)) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) read_words += rx_full_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) tspi->cur_rx_pos += read_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return read_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct tegra_spi_data *tspi, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Make the dma buffer to read by cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) tspi->dma_buf_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (tspi->is_packed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) unsigned int write_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (consume > t->len - tspi->cur_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) consume = t->len - tspi->cur_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) write_bytes = consume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) for (count = 0; count < tspi->curr_dma_words; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u32 x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) for (i = 0; consume && (i < tspi->bytes_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) i++, consume--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) x |= (u32)(*tx_buf++) << (i * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) tspi->tx_dma_buf[count] = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) tspi->cur_tx_pos += write_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* Make the dma buffer to read by dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) tspi->dma_buf_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct tegra_spi_data *tspi, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /* Make the dma buffer to read by cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) tspi->dma_buf_size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (tspi->is_packed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) unsigned int read_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (consume > t->len - tspi->cur_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) consume = t->len - tspi->cur_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) read_bytes = consume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) for (count = 0; count < tspi->curr_dma_words; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) u32 x = tspi->rx_dma_buf[count] & rx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) for (i = 0; consume && (i < tspi->bytes_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) i++, consume--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *rx_buf++ = (x >> (i*8)) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) tspi->cur_rx_pos += read_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* Make the dma buffer to read by dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) tspi->dma_buf_size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static void tegra_spi_dma_complete(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct completion *dma_complete = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) complete(dma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) reinit_completion(&tspi->tx_dma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!tspi->tx_dma_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) dev_err(tspi->dev, "Not able to get desc for Tx\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) tspi->tx_dma_desc->callback = tegra_spi_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) dmaengine_submit(tspi->tx_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) dma_async_issue_pending(tspi->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) reinit_completion(&tspi->rx_dma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!tspi->rx_dma_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) dev_err(tspi->dev, "Not able to get desc for Rx\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) tspi->rx_dma_desc->callback = tegra_spi_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) dmaengine_submit(tspi->rx_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) dma_async_issue_pending(tspi->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static int tegra_spi_flush_fifos(struct tegra_spi_data *tspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) unsigned long timeout = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) status |= SPI_RX_FIFO_FLUSH | SPI_TX_FIFO_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) tegra_spi_writel(tspi, status, SPI_FIFO_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) while ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dev_err(tspi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) "timeout waiting for fifo flush\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static int tegra_spi_start_dma_based_transfer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct tegra_spi_data *tspi, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) u8 dma_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct dma_slave_config dma_sconfig = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) tegra_spi_writel(tspi, val, SPI_DMA_BLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (tspi->is_packed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 4) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) len = tspi->curr_dma_words * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* Set attention level based on length of transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (len & 0xF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dma_burst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) } else if (((len) >> 4) & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) dma_burst = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) dma_burst = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!tspi->soc_data->has_intr_mask_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (tspi->cur_direction & DATA_DIR_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) val |= SPI_IE_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (tspi->cur_direction & DATA_DIR_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) val |= SPI_IE_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) tegra_spi_writel(tspi, val, SPI_DMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) tspi->dma_control_reg = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dma_sconfig.device_fc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (tspi->cur_direction & DATA_DIR_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) dma_sconfig.dst_maxburst = dma_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ret = dmaengine_slave_config(tspi->tx_dma_chan, &dma_sconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dev_err(tspi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) "DMA slave config failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ret = tegra_spi_start_tx_dma(tspi, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) dev_err(tspi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) "Starting tx dma failed, err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (tspi->cur_direction & DATA_DIR_RX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) dma_sconfig.src_maxburst = dma_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ret = dmaengine_slave_config(tspi->rx_dma_chan, &dma_sconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) dev_err(tspi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) "DMA slave config failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* Make the dma buffer to read by dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) tspi->dma_buf_size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ret = tegra_spi_start_rx_dma(tspi, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) dev_err(tspi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) "Starting rx dma failed, err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (tspi->cur_direction & DATA_DIR_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) dmaengine_terminate_all(tspi->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) tspi->is_curr_dma_xfer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) tspi->dma_control_reg = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) val |= SPI_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) tegra_spi_writel(tspi, val, SPI_DMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static int tegra_spi_start_cpu_based_transfer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct tegra_spi_data *tspi, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unsigned cur_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (tspi->cur_direction & DATA_DIR_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) cur_words = tspi->curr_dma_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) val = SPI_DMA_BLK_SET(cur_words - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) tegra_spi_writel(tspi, val, SPI_DMA_BLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (tspi->cur_direction & DATA_DIR_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) val |= SPI_IE_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (tspi->cur_direction & DATA_DIR_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) val |= SPI_IE_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) tegra_spi_writel(tspi, val, SPI_DMA_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) tspi->dma_control_reg = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) tspi->is_curr_dma_xfer = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) val = tspi->command1_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) val |= SPI_PIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) tegra_spi_writel(tspi, val, SPI_COMMAND1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) bool dma_to_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) u32 *dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) dma_addr_t dma_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (IS_ERR(dma_chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return dev_err_probe(tspi->dev, PTR_ERR(dma_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) "Dma channel is not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) &dma_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (!dma_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) dma_release_channel(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (dma_to_memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) tspi->rx_dma_chan = dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) tspi->rx_dma_buf = dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) tspi->rx_dma_phys = dma_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) tspi->tx_dma_chan = dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) tspi->tx_dma_buf = dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) tspi->tx_dma_phys = dma_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) bool dma_to_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) u32 *dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) dma_addr_t dma_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (dma_to_memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) dma_buf = tspi->rx_dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) dma_chan = tspi->rx_dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) dma_phys = tspi->rx_dma_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) tspi->rx_dma_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) tspi->rx_dma_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) dma_buf = tspi->tx_dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) dma_chan = tspi->tx_dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) dma_phys = tspi->tx_dma_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) tspi->tx_dma_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) tspi->tx_dma_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (!dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dma_release_channel(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static int tegra_spi_set_hw_cs_timing(struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct spi_delay *setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct spi_delay *hold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct spi_delay *inactive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) u8 setup_dly, hold_dly, inactive_dly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) u32 setup_hold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) u32 spi_cs_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) u32 inactive_cycles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) u8 cs_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) (hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) (inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) dev_err(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) SPI_DELAY_UNIT_SCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) setup_dly = setup ? setup->value : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) hold_dly = hold ? hold->value : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) inactive_dly = inactive ? inactive->value : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) setup_dly = min_t(u8, setup_dly, MAX_SETUP_HOLD_CYCLES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) hold_dly = min_t(u8, hold_dly, MAX_SETUP_HOLD_CYCLES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (setup_dly && hold_dly) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) setup_hold = SPI_SETUP_HOLD(setup_dly - 1, hold_dly - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) spi_cs_timing = SPI_CS_SETUP_HOLD(tspi->spi_cs_timing1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) spi->chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) setup_hold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (tspi->spi_cs_timing1 != spi_cs_timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) tspi->spi_cs_timing1 = spi_cs_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) inactive_cycles = min_t(u8, inactive_dly, MAX_INACTIVE_CYCLES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (inactive_cycles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) inactive_cycles--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) cs_state = inactive_cycles ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) spi_cs_timing = tspi->spi_cs_timing2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) cs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) SPI_SET_CYCLES_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) inactive_cycles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (tspi->spi_cs_timing2 != spi_cs_timing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) tspi->spi_cs_timing2 = spi_cs_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct spi_transfer *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) bool is_first_of_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) bool is_single_xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct tegra_spi_client_data *cdata = spi->controller_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) u32 speed = t->speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) u8 bits_per_word = t->bits_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) u32 command1, command2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) int req_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) u32 tx_tap = 0, rx_tap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (speed != tspi->cur_speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) clk_set_rate(tspi->clk, speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) tspi->cur_speed = speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) tspi->cur_spi = spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) tspi->cur_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) tspi->cur_rx_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) tspi->cur_tx_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) tspi->curr_xfer = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (is_first_of_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) tegra_spi_clear_status(tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) command1 = tspi->def_command1_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) command1 &= ~SPI_CONTROL_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) req_mode = spi->mode & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (req_mode == SPI_MODE_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) command1 |= SPI_CONTROL_MODE_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) else if (req_mode == SPI_MODE_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) command1 |= SPI_CONTROL_MODE_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) else if (req_mode == SPI_MODE_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) command1 |= SPI_CONTROL_MODE_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) else if (req_mode == SPI_MODE_3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) command1 |= SPI_CONTROL_MODE_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (spi->mode & SPI_LSB_FIRST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) command1 |= SPI_LSBIT_FE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) command1 &= ~SPI_LSBIT_FE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (spi->mode & SPI_3WIRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) command1 |= SPI_BIDIROE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) command1 &= ~SPI_BIDIROE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (tspi->cs_control) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (tspi->cs_control != spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) tegra_spi_writel(tspi, command1, SPI_COMMAND1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) tspi->cs_control = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) tegra_spi_writel(tspi, command1, SPI_COMMAND1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /* GPIO based chip select control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) gpiod_set_value(spi->cs_gpiod, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (is_single_xfer && !(t->cs_change)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) tspi->use_hw_based_cs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) command1 &= ~(SPI_CS_SW_HW | SPI_CS_SW_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) tspi->use_hw_based_cs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) command1 |= SPI_CS_SW_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (spi->mode & SPI_CS_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) command1 |= SPI_CS_SW_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) command1 &= ~SPI_CS_SW_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (tspi->last_used_cs != spi->chip_select) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (cdata && cdata->tx_clk_tap_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) tx_tap = cdata->tx_clk_tap_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (cdata && cdata->rx_clk_tap_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) rx_tap = cdata->rx_clk_tap_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) command2 = SPI_TX_TAP_DELAY(tx_tap) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) SPI_RX_TAP_DELAY(rx_tap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (command2 != tspi->def_command2_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) tegra_spi_writel(tspi, command2, SPI_COMMAND2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) tspi->last_used_cs = spi->chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) command1 = tspi->command1_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) command1 &= ~SPI_BIT_LENGTH(~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return command1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static int tegra_spi_start_transfer_one(struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct spi_transfer *t, u32 command1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) unsigned total_fifo_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (t->rx_nbits == SPI_NBITS_DUAL || t->tx_nbits == SPI_NBITS_DUAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) command1 |= SPI_BOTH_EN_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) command1 &= ~SPI_BOTH_EN_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (tspi->is_packed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) command1 |= SPI_PACKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) command1 &= ~SPI_PACKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) tspi->cur_direction = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (t->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) command1 |= SPI_RX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) tspi->cur_direction |= DATA_DIR_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (t->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) command1 |= SPI_TX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) tspi->cur_direction |= DATA_DIR_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) command1 |= SPI_CS_SEL(spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) tegra_spi_writel(tspi, command1, SPI_COMMAND1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) tspi->command1_reg = command1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) tspi->def_command1_reg, (unsigned)command1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ret = tegra_spi_flush_fifos(tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (total_fifo_words > SPI_FIFO_DEPTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) ret = tegra_spi_start_dma_based_transfer(tspi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ret = tegra_spi_start_cpu_based_transfer(tspi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static struct tegra_spi_client_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) *tegra_spi_parse_cdata_dt(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct tegra_spi_client_data *cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct device_node *slave_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) slave_np = spi->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (!slave_np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) dev_dbg(&spi->dev, "device node not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (!cdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) &cdata->tx_clk_tap_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) &cdata->rx_clk_tap_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) static void tegra_spi_cleanup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct tegra_spi_client_data *cdata = spi->controller_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) spi->controller_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (spi->dev.of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) kfree(cdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static int tegra_spi_setup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct tegra_spi_client_data *cdata = spi->controller_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) spi->bits_per_word,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) spi->mode & SPI_CPOL ? "" : "~",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) spi->mode & SPI_CPHA ? "" : "~",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) spi->max_speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (!cdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) cdata = tegra_spi_parse_cdata_dt(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) spi->controller_data = cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) ret = pm_runtime_get_sync(tspi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) pm_runtime_put_noidle(tspi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (cdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) tegra_spi_cleanup(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (tspi->soc_data->has_intr_mask_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) val = tegra_spi_readl(tspi, SPI_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) val &= ~SPI_INTR_ALL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) tegra_spi_writel(tspi, val, SPI_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) spin_lock_irqsave(&tspi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /* GPIO based chip select control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) gpiod_set_value(spi->cs_gpiod, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) val = tspi->def_command1_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (spi->mode & SPI_CS_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) val |= SPI_CS_POL_INACTIVE(spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) tspi->def_command1_reg = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) spin_unlock_irqrestore(&tspi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) pm_runtime_put(tspi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static void tegra_spi_transfer_end(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /* GPIO based chip select control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) gpiod_set_value(spi->cs_gpiod, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (!tspi->use_hw_based_cs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (cs_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) tspi->command1_reg |= SPI_CS_SW_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) tspi->command1_reg &= ~SPI_CS_SW_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static void tegra_spi_dump_regs(struct tegra_spi_data *tspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) dev_dbg(tspi->dev, "============ SPI REGISTER DUMP ============\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) dev_dbg(tspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) tegra_spi_readl(tspi, SPI_COMMAND1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) tegra_spi_readl(tspi, SPI_COMMAND2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) dev_dbg(tspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) tegra_spi_readl(tspi, SPI_DMA_CTL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) tegra_spi_readl(tspi, SPI_DMA_BLK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) dev_dbg(tspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) tegra_spi_readl(tspi, SPI_TRANS_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) tegra_spi_readl(tspi, SPI_FIFO_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static int tegra_spi_transfer_one_message(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) bool is_first_msg = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct tegra_spi_data *tspi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct spi_device *spi = msg->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) bool skip = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) int single_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) msg->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) msg->actual_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) single_xfer = list_is_singular(&msg->transfers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) u32 cmd1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) reinit_completion(&tspi->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) single_xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (!xfer->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) skip = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) goto complete_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ret = tegra_spi_start_transfer_one(spi, xfer, cmd1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) dev_err(tspi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) "spi can not start transfer, err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) goto complete_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) is_first_msg = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ret = wait_for_completion_timeout(&tspi->xfer_completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) SPI_DMA_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (WARN_ON(ret == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) dev_err(tspi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) "spi transfer timeout, err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (tspi->is_curr_dma_xfer &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) (tspi->cur_direction & DATA_DIR_TX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) dmaengine_terminate_all(tspi->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (tspi->is_curr_dma_xfer &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) (tspi->cur_direction & DATA_DIR_RX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) dmaengine_terminate_all(tspi->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) tegra_spi_dump_regs(tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) tegra_spi_flush_fifos(tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) reset_control_assert(tspi->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) reset_control_deassert(tspi->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) tspi->last_used_cs = master->num_chipselect + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) goto complete_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (tspi->tx_status || tspi->rx_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) dev_err(tspi->dev, "Error in Transfer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) tegra_spi_dump_regs(tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) goto complete_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) msg->actual_length += xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) complete_xfer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (ret < 0 || skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) tegra_spi_transfer_end(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) spi_transfer_delay_exec(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) } else if (list_is_last(&xfer->transfer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) &msg->transfers)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (xfer->cs_change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) tspi->cs_control = spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) tegra_spi_transfer_end(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) spi_transfer_delay_exec(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) } else if (xfer->cs_change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) tegra_spi_transfer_end(spi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) spi_transfer_delay_exec(xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) msg->status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) spi_finalize_current_message(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct spi_transfer *t = tspi->curr_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) spin_lock_irqsave(&tspi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (tspi->tx_status || tspi->rx_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) tspi->status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) tspi->command1_reg, tspi->dma_control_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) tegra_spi_dump_regs(tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) tegra_spi_flush_fifos(tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) complete(&tspi->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) spin_unlock_irqrestore(&tspi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) reset_control_assert(tspi->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) reset_control_deassert(tspi->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (tspi->cur_direction & DATA_DIR_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (tspi->cur_direction & DATA_DIR_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) tspi->cur_pos = tspi->cur_tx_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) tspi->cur_pos = tspi->cur_rx_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (tspi->cur_pos == t->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) complete(&tspi->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) tegra_spi_start_cpu_based_transfer(tspi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) spin_unlock_irqrestore(&tspi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct spi_transfer *t = tspi->curr_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) long wait_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) unsigned total_fifo_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* Abort dmas if any error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (tspi->cur_direction & DATA_DIR_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (tspi->tx_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) dmaengine_terminate_all(tspi->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) err += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) wait_status = wait_for_completion_interruptible_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) &tspi->tx_dma_complete, SPI_DMA_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (wait_status <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) dmaengine_terminate_all(tspi->tx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) dev_err(tspi->dev, "TxDma Xfer failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) err += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (tspi->cur_direction & DATA_DIR_RX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (tspi->rx_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) dmaengine_terminate_all(tspi->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) err += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) wait_status = wait_for_completion_interruptible_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) &tspi->rx_dma_complete, SPI_DMA_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (wait_status <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) dmaengine_terminate_all(tspi->rx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) dev_err(tspi->dev, "RxDma Xfer failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) err += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) spin_lock_irqsave(&tspi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) tspi->status_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) tspi->command1_reg, tspi->dma_control_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) tegra_spi_dump_regs(tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) tegra_spi_flush_fifos(tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) complete(&tspi->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) spin_unlock_irqrestore(&tspi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) reset_control_assert(tspi->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) reset_control_deassert(tspi->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (tspi->cur_direction & DATA_DIR_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (tspi->cur_direction & DATA_DIR_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) tspi->cur_pos = tspi->cur_tx_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) tspi->cur_pos = tspi->cur_rx_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (tspi->cur_pos == t->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) complete(&tspi->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /* Continue transfer in current message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) total_fifo_words = tegra_spi_calculate_curr_xfer_param(tspi->cur_spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) tspi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (total_fifo_words > SPI_FIFO_DEPTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) err = tegra_spi_start_dma_based_transfer(tspi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) err = tegra_spi_start_cpu_based_transfer(tspi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) spin_unlock_irqrestore(&tspi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) static irqreturn_t tegra_spi_isr_thread(int irq, void *context_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) struct tegra_spi_data *tspi = context_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (!tspi->is_curr_dma_xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return handle_cpu_based_xfer(tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return handle_dma_based_xfer(tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static irqreturn_t tegra_spi_isr(int irq, void *context_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct tegra_spi_data *tspi = context_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (tspi->cur_direction & DATA_DIR_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) tspi->tx_status = tspi->status_reg &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) (SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (tspi->cur_direction & DATA_DIR_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) tspi->rx_status = tspi->status_reg &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) (SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) tegra_spi_clear_status(tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static struct tegra_spi_soc_data tegra114_spi_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) .has_intr_mask_reg = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static struct tegra_spi_soc_data tegra124_spi_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) .has_intr_mask_reg = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static struct tegra_spi_soc_data tegra210_spi_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) .has_intr_mask_reg = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) static const struct of_device_id tegra_spi_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) .compatible = "nvidia,tegra114-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) .data = &tegra114_spi_soc_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) .compatible = "nvidia,tegra124-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) .data = &tegra124_spi_soc_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) .compatible = "nvidia,tegra210-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) .data = &tegra210_spi_soc_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static int tegra_spi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) struct tegra_spi_data *tspi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) int ret, spi_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) int bus_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (!master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) dev_err(&pdev->dev, "master allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) platform_set_drvdata(pdev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) tspi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) &master->max_speed_hz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) master->max_speed_hz = 25000000; /* 25MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /* the spi->mode bits understood by this driver: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) master->use_gpio_descriptors = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) master->setup = tegra_spi_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) master->cleanup = tegra_spi_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) master->transfer_one_message = tegra_spi_transfer_one_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) master->set_cs_timing = tegra_spi_set_hw_cs_timing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) master->num_chipselect = MAX_CHIP_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) master->auto_runtime_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (bus_num >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) master->bus_num = bus_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) tspi->master = master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) tspi->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) spin_lock_init(&tspi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) tspi->soc_data = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (!tspi->soc_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) dev_err(&pdev->dev, "unsupported tegra\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) goto exit_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) tspi->base = devm_ioremap_resource(&pdev->dev, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (IS_ERR(tspi->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) ret = PTR_ERR(tspi->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) goto exit_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) tspi->phys = r->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) spi_irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (spi_irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) ret = spi_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) goto exit_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) tspi->irq = spi_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) tspi->clk = devm_clk_get(&pdev->dev, "spi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (IS_ERR(tspi->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) dev_err(&pdev->dev, "can not get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) ret = PTR_ERR(tspi->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) goto exit_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (IS_ERR(tspi->rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) dev_err(&pdev->dev, "can not get reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ret = PTR_ERR(tspi->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) goto exit_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) ret = tegra_spi_init_dma_param(tspi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) goto exit_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) ret = tegra_spi_init_dma_param(tspi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) goto exit_rx_dma_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) tspi->max_buf_size = tspi->dma_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) init_completion(&tspi->tx_dma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) init_completion(&tspi->rx_dma_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) init_completion(&tspi->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (!pm_runtime_enabled(&pdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) ret = tegra_spi_runtime_resume(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) goto exit_pm_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) ret = pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) pm_runtime_put_noidle(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) goto exit_pm_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) reset_control_assert(tspi->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) reset_control_deassert(tspi->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) tspi->def_command1_reg = SPI_M_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) tspi->spi_cs_timing1 = tegra_spi_readl(tspi, SPI_CS_TIMING1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) tspi->spi_cs_timing2 = tegra_spi_readl(tspi, SPI_CS_TIMING2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) tspi->def_command2_reg = tegra_spi_readl(tspi, SPI_COMMAND2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) tspi->last_used_cs = master->num_chipselect + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) pm_runtime_put(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) tegra_spi_isr_thread, IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) dev_name(&pdev->dev), tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) tspi->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) goto exit_pm_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) master->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) ret = devm_spi_register_master(&pdev->dev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) dev_err(&pdev->dev, "can not register to master err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) goto exit_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) exit_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) free_irq(spi_irq, tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) exit_pm_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (!pm_runtime_status_suspended(&pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) tegra_spi_runtime_suspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) tegra_spi_deinit_dma_param(tspi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) exit_rx_dma_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) tegra_spi_deinit_dma_param(tspi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) exit_free_master:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) spi_master_put(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static int tegra_spi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) struct spi_master *master = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) struct tegra_spi_data *tspi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) free_irq(tspi->irq, tspi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (tspi->tx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) tegra_spi_deinit_dma_param(tspi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (tspi->rx_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) tegra_spi_deinit_dma_param(tspi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (!pm_runtime_status_suspended(&pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) tegra_spi_runtime_suspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static int tegra_spi_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) return spi_master_suspend(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) static int tegra_spi_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) struct tegra_spi_data *tspi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) ret = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) dev_err(dev, "pm runtime failed, e = %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) tegra_spi_writel(tspi, tspi->def_command2_reg, SPI_COMMAND2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) tspi->last_used_cs = master->num_chipselect + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) return spi_master_resume(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) static int tegra_spi_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) struct tegra_spi_data *tspi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /* Flush all write which are in PPSB queue by reading back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) tegra_spi_readl(tspi, SPI_COMMAND1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) clk_disable_unprepare(tspi->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static int tegra_spi_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) struct tegra_spi_data *tspi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) ret = clk_prepare_enable(tspi->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static const struct dev_pm_ops tegra_spi_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) tegra_spi_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) static struct platform_driver tegra_spi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) .name = "spi-tegra114",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) .pm = &tegra_spi_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) .of_match_table = tegra_spi_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) .probe = tegra_spi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) .remove = tegra_spi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) module_platform_driver(tegra_spi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) MODULE_ALIAS("platform:spi-tegra114");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) MODULE_DESCRIPTION("NVIDIA Tegra114 SPI Controller Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) MODULE_LICENSE("GPL v2");