^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * IMG SPFI controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2014 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define SPFI_CONTROL 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define SPFI_CONTROL_CONTINUE BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define SPFI_CONTROL_SOFT_RESET BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define SPFI_CONTROL_SEND_DMA BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define SPFI_CONTROL_GET_DMA BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define SPFI_CONTROL_SE BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define SPFI_CONTROL_TMODE_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define SPFI_CONTROL_TMODE_MASK 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define SPFI_CONTROL_TMODE_SINGLE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define SPFI_CONTROL_TMODE_DUAL 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define SPFI_CONTROL_TMODE_QUAD 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define SPFI_CONTROL_SPFI_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SPFI_TRANSACTION 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SPFI_TRANSACTION_TSIZE_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SPFI_TRANSACTION_TSIZE_MASK 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define SPFI_PORT_STATE 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define SPFI_PORT_STATE_DEV_SEL_SHIFT 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define SPFI_PORT_STATE_DEV_SEL_MASK 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SPFI_TX_32BIT_VALID_DATA 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define SPFI_TX_8BIT_VALID_DATA 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define SPFI_RX_32BIT_VALID_DATA 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define SPFI_RX_8BIT_VALID_DATA 0x2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SPFI_INTERRUPT_STATUS 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define SPFI_INTERRUPT_ENABLE 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SPFI_INTERRUPT_CLEAR 0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SPFI_INTERRUPT_IACCESS BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define SPFI_INTERRUPT_GDEX8BIT BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define SPFI_INTERRUPT_ALLDONETRIG BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define SPFI_INTERRUPT_GDFUL BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define SPFI_INTERRUPT_GDHF BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define SPFI_INTERRUPT_GDEX32BIT BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define SPFI_INTERRUPT_GDTRIG BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define SPFI_INTERRUPT_SDFUL BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define SPFI_INTERRUPT_SDHF BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define SPFI_INTERRUPT_SDE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define SPFI_INTERRUPT_SDTRIG BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * There are four parallel FIFOs of 16 bytes each. The word buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * accesses only a single FIFO, resulting in an effective FIFO size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * 16 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define SPFI_32BIT_FIFO_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define SPFI_8BIT_FIFO_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct img_spfi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct clk *spfi_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct clk *sys_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct dma_chan *rx_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct dma_chan *tx_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) bool tx_dma_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) bool rx_dma_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return readl(spfi->regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) writel(val, spfi->regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static inline void spfi_start(struct img_spfi *spfi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) val = spfi_readl(spfi, SPFI_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) val |= SPFI_CONTROL_SPFI_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) spfi_writel(spfi, val, SPFI_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline void spfi_reset(struct img_spfi *spfi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spfi_writel(spfi, 0, SPFI_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static int spfi_wait_all_done(struct img_spfi *spfi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long timeout = jiffies + msecs_to_jiffies(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) while (time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (status & SPFI_INTERRUPT_ALLDONETRIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) SPFI_INTERRUPT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) dev_err(spfi->dev, "Timed out waiting for transaction to complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spfi_reset(spfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) while (count < max / 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (status & SPFI_INTERRUPT_SDFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return count * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) while (count < max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (status & SPFI_INTERRUPT_SDFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) while (count < max / 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) SPFI_INTERRUPT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!(status & SPFI_INTERRUPT_GDEX32BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return count * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) while (count < max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) SPFI_INTERRUPT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (!(status & SPFI_INTERRUPT_GDEX8BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int img_spfi_start_pio(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct img_spfi *spfi = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned int tx_bytes = 0, rx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) const void *tx_buf = xfer->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void *rx_buf = xfer->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) tx_bytes = xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) rx_bytes = xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) spfi_start(spfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) timeout = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) while ((tx_bytes > 0 || rx_bytes > 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned int tx_count, rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (tx_bytes >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (rx_bytes >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) tx_buf += tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) rx_buf += rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) tx_bytes -= tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) rx_bytes -= rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (rx_bytes > 0 || tx_bytes > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dev_err(spfi->dev, "PIO transfer timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ret = spfi_wait_all_done(spfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static void img_spfi_dma_rx_cb(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct img_spfi *spfi = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) spfi_wait_all_done(spfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) spin_lock_irqsave(&spfi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) spfi->rx_dma_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (!spfi->tx_dma_busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) spi_finalize_current_transfer(spfi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) spin_unlock_irqrestore(&spfi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void img_spfi_dma_tx_cb(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct img_spfi *spfi = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) spfi_wait_all_done(spfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) spin_lock_irqsave(&spfi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) spfi->tx_dma_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (!spfi->rx_dma_busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) spi_finalize_current_transfer(spfi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) spin_unlock_irqrestore(&spfi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static int img_spfi_start_dma(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct img_spfi *spfi = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct dma_slave_config rxconf, txconf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) spfi->rx_dma_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) spfi->tx_dma_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (xfer->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) rxconf.direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (xfer->len % 4 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) rxconf.src_addr_width = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) rxconf.src_maxburst = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rxconf.src_addr_width = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) rxconf.src_maxburst = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) dmaengine_slave_config(spfi->rx_ch, &rxconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) xfer->rx_sg.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!rxdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) goto stop_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) rxdesc->callback = img_spfi_dma_rx_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) rxdesc->callback_param = spfi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (xfer->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) txconf.direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (xfer->len % 4 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) txconf.dst_addr_width = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) txconf.dst_maxburst = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) txconf.dst_addr_width = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) txconf.dst_maxburst = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) dmaengine_slave_config(spfi->tx_ch, &txconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) xfer->tx_sg.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!txdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) goto stop_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) txdesc->callback = img_spfi_dma_tx_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) txdesc->callback_param = spfi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (xfer->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) spfi->rx_dma_busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) dmaengine_submit(rxdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) dma_async_issue_pending(spfi->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) spfi_start(spfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (xfer->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) spfi->tx_dma_busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dmaengine_submit(txdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dma_async_issue_pending(spfi->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) stop_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dmaengine_terminate_all(spfi->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) dmaengine_terminate_all(spfi->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void img_spfi_handle_err(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct img_spfi *spfi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Stop all DMA and reset the controller if the previous transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * timed-out and never completed it's DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) spin_lock_irqsave(&spfi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) spfi->tx_dma_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) spfi->rx_dma_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) dmaengine_terminate_all(spfi->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) dmaengine_terminate_all(spfi->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) spin_unlock_irqrestore(&spfi->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct img_spfi *spfi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) val = spfi_readl(spfi, SPFI_PORT_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) SPFI_PORT_STATE_DEV_SEL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) val |= msg->spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (msg->spi->mode & SPI_CPHA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) val &= ~SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (msg->spi->mode & SPI_CPOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) val |= SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) val &= ~SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) spfi_writel(spfi, val, SPFI_PORT_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int img_spfi_unprepare(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct img_spfi *spfi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) spfi_reset(spfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct img_spfi *spfi = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u32 val, div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * output = spfi_clk * (BITCLK / 512), where BITCLK must be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * power of 2 up to 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) div = clamp(512 / (1 << get_count_order(div)), 1, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) SPFI_DEVICE_PARAMETER_BITCLK_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) SPFI_TRANSACTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) val = spfi_readl(spfi, SPFI_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (xfer->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) val |= SPFI_CONTROL_SEND_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (xfer->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) val |= SPFI_CONTROL_GET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (xfer->tx_nbits == SPI_NBITS_DUAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) xfer->rx_nbits == SPI_NBITS_DUAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) xfer->rx_nbits == SPI_NBITS_QUAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) val |= SPFI_CONTROL_SE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) spfi_writel(spfi, val, SPFI_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static int img_spfi_transfer_one(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct img_spfi *spfi = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) dev_err(spfi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) "Transfer length (%d) is greater than the max supported (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) img_spfi_config(master, spi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (master->can_dma && master->can_dma(master, spi, xfer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) ret = img_spfi_start_dma(master, spi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) ret = img_spfi_start_pio(master, spi, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (xfer->len > SPFI_32BIT_FIFO_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static irqreturn_t img_spfi_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct img_spfi *spfi = (struct img_spfi *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (status & SPFI_INTERRUPT_IACCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dev_err(spfi->dev, "Illegal access interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static int img_spfi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct img_spfi *spfi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) u32 max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) master = spi_alloc_master(&pdev->dev, sizeof(*spfi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (!master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) platform_set_drvdata(pdev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) spfi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) spfi->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) spfi->master = master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) spin_lock_init(&spfi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) spfi->regs = devm_ioremap_resource(spfi->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (IS_ERR(spfi->regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ret = PTR_ERR(spfi->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) goto put_spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) spfi->phys = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) spfi->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (spfi->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ret = spfi->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) goto put_spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) goto put_spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) spfi->sys_clk = devm_clk_get(spfi->dev, "sys");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (IS_ERR(spfi->sys_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ret = PTR_ERR(spfi->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) goto put_spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (IS_ERR(spfi->spfi_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ret = PTR_ERR(spfi->spfi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) goto put_spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ret = clk_prepare_enable(spfi->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) goto put_spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ret = clk_prepare_enable(spfi->spfi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) goto disable_pclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) spfi_reset(spfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * Only enable the error (IACCESS) interrupt. In PIO mode we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * poll the status of the FIFOs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) master->auto_runtime_pm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) master->bus_num = pdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) master->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * Maximum speed supported by spfi is limited to the lower value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * between 1/4 of the SPFI clock or to "spfi-max-frequency"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * defined in the device tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * If no value is defined in the device tree assume the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * speed supported to be 1/4 of the SPFI clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (!of_property_read_u32(spfi->dev->of_node, "spfi-max-frequency",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) &max_speed_hz)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (master->max_speed_hz > max_speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) master->max_speed_hz = max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) master->transfer_one = img_spfi_transfer_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) master->prepare_message = img_spfi_prepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) master->unprepare_message = img_spfi_unprepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) master->handle_err = img_spfi_handle_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) master->use_gpio_descriptors = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (IS_ERR(spfi->tx_ch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ret = PTR_ERR(spfi->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) spfi->tx_ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto disable_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) spfi->rx_ch = dma_request_chan(spfi->dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (IS_ERR(spfi->rx_ch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ret = PTR_ERR(spfi->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) spfi->rx_ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) goto disable_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (!spfi->tx_ch || !spfi->rx_ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (spfi->tx_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) dma_release_channel(spfi->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (spfi->rx_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) dma_release_channel(spfi->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) spfi->tx_ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) spfi->rx_ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) master->dma_tx = spfi->tx_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) master->dma_rx = spfi->rx_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) master->can_dma = img_spfi_can_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) pm_runtime_set_active(spfi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) pm_runtime_enable(spfi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ret = devm_spi_register_master(spfi->dev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) goto disable_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) disable_pm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) pm_runtime_disable(spfi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (spfi->rx_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dma_release_channel(spfi->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (spfi->tx_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) dma_release_channel(spfi->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) clk_disable_unprepare(spfi->spfi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) disable_pclk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) clk_disable_unprepare(spfi->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) put_spi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) spi_master_put(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static int img_spfi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct spi_master *master = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct img_spfi *spfi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (spfi->tx_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) dma_release_channel(spfi->tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (spfi->rx_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) dma_release_channel(spfi->rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) pm_runtime_disable(spfi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (!pm_runtime_status_suspended(spfi->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) clk_disable_unprepare(spfi->spfi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) clk_disable_unprepare(spfi->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) static int img_spfi_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct img_spfi *spfi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) clk_disable_unprepare(spfi->spfi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) clk_disable_unprepare(spfi->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static int img_spfi_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct img_spfi *spfi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ret = clk_prepare_enable(spfi->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ret = clk_prepare_enable(spfi->spfi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) clk_disable_unprepare(spfi->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static int img_spfi_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return spi_master_suspend(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static int img_spfi_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct spi_master *master = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct img_spfi *spfi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ret = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) spfi_reset(spfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return spi_master_resume(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static const struct dev_pm_ops img_spfi_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static const struct of_device_id img_spfi_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) { .compatible = "img,spfi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) MODULE_DEVICE_TABLE(of, img_spfi_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static struct platform_driver img_spfi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) .name = "img-spfi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) .pm = &img_spfi_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) .of_match_table = of_match_ptr(img_spfi_of_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) .probe = img_spfi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .remove = img_spfi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) module_platform_driver(img_spfi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) MODULE_DESCRIPTION("IMG SPFI controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) MODULE_LICENSE("GPL v2");