^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PIC32 Quad SPI controller driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Purna Chandra Mandal <purna.mandal@microchip.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2016, Microchip Technology Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* SQI registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define PESQI_XIP_CONF1_REG 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define PESQI_XIP_CONF2_REG 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define PESQI_CONF_REG 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define PESQI_CTRL_REG 0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define PESQI_CLK_CTRL_REG 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define PESQI_CMD_THRES_REG 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define PESQI_INT_THRES_REG 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define PESQI_INT_ENABLE_REG 0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define PESQI_INT_STAT_REG 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define PESQI_TX_DATA_REG 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define PESQI_RX_DATA_REG 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define PESQI_STAT1_REG 0x2C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define PESQI_STAT2_REG 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define PESQI_BD_CTRL_REG 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define PESQI_BD_CUR_ADDR_REG 0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define PESQI_BD_BASE_ADDR_REG 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define PESQI_BD_STAT_REG 0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PESQI_BD_POLL_CTRL_REG 0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define PESQI_BD_TX_DMA_STAT_REG 0x4C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define PESQI_BD_RX_DMA_STAT_REG 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define PESQI_THRES_REG 0x54
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define PESQI_INT_SIGEN_REG 0x58
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* PESQI_CONF_REG fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define PESQI_MODE 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define PESQI_MODE_BOOT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define PESQI_MODE_PIO 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define PESQI_MODE_DMA 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define PESQI_MODE_XIP 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define PESQI_MODE_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define PESQI_CPHA BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define PESQI_CPOL BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define PESQI_LSBF BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define PESQI_RXLATCH BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define PESQI_SERMODE BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define PESQI_WP_EN BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define PESQI_HOLD_EN BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define PESQI_BURST_EN BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define PESQI_CS_CTRL_HW BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define PESQI_SOFT_RESET BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define PESQI_LANES_SHIFT 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define PESQI_SINGLE_LANE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define PESQI_DUAL_LANE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define PESQI_QUAD_LANE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define PESQI_CSEN_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define PESQI_EN BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* PESQI_CLK_CTRL_REG fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define PESQI_CLK_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define PESQI_CLK_STABLE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define PESQI_CLKDIV_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define PESQI_CLKDIV 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* PESQI_INT_THR/CMD_THR_REG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define PESQI_TXTHR_MASK 0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define PESQI_TXTHR_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define PESQI_RXTHR_MASK 0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define PESQI_RXTHR_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define PESQI_TXEMPTY BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define PESQI_TXFULL BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define PESQI_TXTHR BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define PESQI_RXEMPTY BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define PESQI_RXFULL BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define PESQI_RXTHR BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define PESQI_BDDONE BIT(9) /* BD processing complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define PESQI_PKTCOMP BIT(10) /* packet processing complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define PESQI_DMAERR BIT(11) /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* PESQI_BD_CTRL_REG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define PESQI_DMA_EN BIT(0) /* enable DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define PESQI_POLL_EN BIT(1) /* enable polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define PESQI_BDP_START BIT(2) /* start BD processor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* PESQI controller buffer descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct buf_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 bd_ctrl; /* control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 bd_status; /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u32 bd_addr; /* DMA buffer addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u32 bd_nextp; /* next item in chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* bd_ctrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define BD_BUFLEN 0x1ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define BD_CBD_INT_EN BIT(16) /* Current BD is processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define BD_PKT_INT_EN BIT(17) /* All BDs of PKT processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define BD_LIFM BIT(18) /* last data of pkt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define BD_LAST BIT(19) /* end of list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define BD_DATA_RECV BIT(20) /* receive data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define BD_DDR BIT(21) /* DDR mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define BD_DUAL BIT(22) /* Dual SPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define BD_QUAD BIT(23) /* Quad SPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define BD_LSBF BIT(25) /* LSB First */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define BD_STAT_CHECK BIT(27) /* Status poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define BD_DEVSEL_SHIFT 28 /* CS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define BD_CS_DEASSERT BIT(30) /* de-assert CS after current BD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define BD_EN BIT(31) /* BD owned by H/W */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * struct ring_desc - Representation of SQI ring descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * @list: list element to add to free or used list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * @bd: PESQI controller buffer descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * @bd_dma: DMA address of PESQI controller buffer descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * @xfer_len: transfer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct ring_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct buf_desc *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) dma_addr_t bd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u32 xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Global constants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define PESQI_BD_BUF_LEN_MAX 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define PESQI_BD_COUNT 256 /* max 64KB data per spi message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct pic32_sqi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct clk *sys_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct clk *base_clk; /* drives spi clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct completion xfer_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct ring_desc *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dma_addr_t bd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct list_head bd_list_free; /* free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct list_head bd_list_used; /* allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct spi_device *cur_spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u32 cur_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u8 cur_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static inline void pic32_setbits(void __iomem *reg, u32 set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) writel(readl(reg) | set, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static inline void pic32_clrbits(void __iomem *reg, u32 clr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) writel(readl(reg) & ~clr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) u32 val, div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* div = base_clk / (2 * spi_clk) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) div = clk_get_rate(sqi->base_clk) / (2 * sck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) div &= PESQI_CLKDIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) val = readl(sqi->regs + PESQI_CLK_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* apply new divider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) val |= div << PESQI_CLKDIV_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) writel(val, sqi->regs + PESQI_CLK_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* wait for stability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) val & PESQI_CLK_STABLE, 1, 5000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) writel(mask, sqi->regs + PESQI_INT_ENABLE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* INT_SIGEN works as interrupt-gate to INTR line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) writel(mask, sqi->regs + PESQI_INT_SIGEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) writel(0, sqi->regs + PESQI_INT_ENABLE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) writel(0, sqi->regs + PESQI_INT_SIGEN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static irqreturn_t pic32_sqi_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct pic32_sqi *sqi = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u32 enable, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) enable = readl(sqi->regs + PESQI_INT_ENABLE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) status = readl(sqi->regs + PESQI_INT_STAT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* check spurious interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (status & PESQI_DMAERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) goto irq_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (status & PESQI_TXTHR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (status & PESQI_RXTHR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (status & PESQI_BDDONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) enable &= ~PESQI_BDDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* packet processing completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (status & PESQI_PKTCOMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* mask all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* complete trasaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) complete(&sqi->xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) irq_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* interrupts are sticky, so mask when handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) writel(enable, sqi->regs + PESQI_INT_ENABLE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct ring_desc *rdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (list_empty(&sqi->bd_list_free))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) list_move_tail(&rdesc->list, &sqi->bd_list_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return rdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) list_move(&rdesc->list, &sqi->bd_list_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct spi_message *mesg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct spi_device *spi = mesg->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct scatterlist *sg, *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct ring_desc *rdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct buf_desc *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int nents, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) u32 bd_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u32 nbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Device selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) bd_ctrl = spi->chip_select << BD_DEVSEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* half-duplex: select transfer buffer, direction and lane */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (xfer->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) bd_ctrl |= BD_DATA_RECV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) nbits = xfer->rx_nbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) sgl = xfer->rx_sg.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) nents = xfer->rx_sg.nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) nbits = xfer->tx_nbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) sgl = xfer->tx_sg.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) nents = xfer->tx_sg.nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (nbits & SPI_NBITS_QUAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) bd_ctrl |= BD_QUAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) else if (nbits & SPI_NBITS_DUAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) bd_ctrl |= BD_DUAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* LSB first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (spi->mode & SPI_LSB_FIRST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) bd_ctrl |= BD_LSBF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* ownership to hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) bd_ctrl |= BD_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) for_each_sg(sgl, sg, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* get ring descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) rdesc = ring_desc_get(sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!rdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) bd = rdesc->bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* BD CTRL: length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) rdesc->xfer_len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) bd->bd_ctrl = bd_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) bd->bd_ctrl |= rdesc->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* BD STAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) bd->bd_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* BD BUFFER ADDRESS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) bd->bd_addr = sg->dma_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static int pic32_sqi_prepare_hardware(struct spi_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct pic32_sqi *sqi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* enable spi interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* enable spi clk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static bool pic32_sqi_can_dma(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct spi_transfer *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Do DMA irrespective of transfer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int pic32_sqi_one_message(struct spi_master *master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct spi_device *spi = msg->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct ring_desc *rdesc, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct spi_transfer *xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct pic32_sqi *sqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int ret = 0, mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) sqi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) reinit_completion(&sqi->xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) msg->actual_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* We can't handle spi_transfer specific "speed_hz", "bits_per_word"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * and "delay_usecs". But spi_device specific speed and mode change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * can be handled at best during spi chip-select switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (sqi->cur_spi != spi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* set spi speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (sqi->cur_speed != spi->max_speed_hz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) sqi->cur_speed = spi->max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) dev_warn(&spi->dev, "set_clk, %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* set spi mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (sqi->cur_mode != mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) val = readl(sqi->regs + PESQI_CONF_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (mode & SPI_CPOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) val |= PESQI_CPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (mode & SPI_LSB_FIRST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) val |= PESQI_LSBF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) val |= PESQI_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) writel(val, sqi->regs + PESQI_CONF_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) sqi->cur_mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) sqi->cur_spi = spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* prepare hardware desc-list(BD) for transfer(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) list_for_each_entry(xfer, &msg->transfers, transfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) ret = pic32_sqi_one_transfer(sqi, msg, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) dev_err(&spi->dev, "xfer %p err\n", xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) goto xfer_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * element of the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) BD_LIFM | BD_PKT_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* set base address BD list for DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) pic32_sqi_enable_int(sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* enable DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) writel(val, sqi->regs + PESQI_BD_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* wait for xfer completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) timeout = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dev_err(&sqi->master->dev, "wait timedout/interrupted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) msg->status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) msg->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* disable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) writel(0, sqi->regs + PESQI_BD_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) pic32_sqi_disable_int(sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) xfer_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) list_for_each_entry_safe_reverse(rdesc, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) &sqi->bd_list_used, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* Update total byte transferred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) msg->actual_length += rdesc->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* release ring descr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ring_desc_put(sqi, rdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) spi_finalize_current_message(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static int pic32_sqi_unprepare_hardware(struct spi_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct pic32_sqi *sqi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* disable clk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* disable spi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct ring_desc *rdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct buf_desc *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* allocate coherent DMAable memory for hardware buffer descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) sqi->bd = dma_alloc_coherent(&sqi->master->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) sizeof(*bd) * PESQI_BD_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) &sqi->bd_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (!sqi->bd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* allocate software ring descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (!sqi->ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dma_free_coherent(&sqi->master->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) sizeof(*bd) * PESQI_BD_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) sqi->bd, sqi->bd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) bd = (struct buf_desc *)sqi->bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) INIT_LIST_HEAD(&sqi->bd_list_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) INIT_LIST_HEAD(&sqi->bd_list_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* initialize ring-desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) INIT_LIST_HEAD(&rdesc->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) rdesc->bd = &bd[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) list_add_tail(&rdesc->list, &sqi->bd_list_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Prepare BD: chain to next BD(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) bd[i].bd_nextp = rdesc[i + 1].bd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) bd[PESQI_BD_COUNT - 1].bd_nextp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static void ring_desc_ring_free(struct pic32_sqi *sqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) dma_free_coherent(&sqi->master->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) sizeof(struct buf_desc) * PESQI_BD_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) sqi->bd, sqi->bd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) kfree(sqi->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static void pic32_sqi_hw_init(struct pic32_sqi *sqi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* Soft-reset of PESQI controller triggers interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * We are not yet ready to handle them so disable CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * interrupt for the time being.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* assert soft-reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* wait until clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) !(val & PESQI_SOFT_RESET), 1, 5000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* disable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) pic32_sqi_disable_int(sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* Now it is safe to enable back CPU interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* tx and rx fifo interrupt threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) val = readl(sqi->regs + PESQI_CMD_THRES_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) writel(val, sqi->regs + PESQI_CMD_THRES_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) val = readl(sqi->regs + PESQI_INT_THRES_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) writel(val, sqi->regs + PESQI_INT_THRES_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /* default configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) val = readl(sqi->regs + PESQI_CONF_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /* set mode: DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) val &= ~PESQI_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) writel(val, sqi->regs + PESQI_CONF_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* DATAEN - SQIID0-ID3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* burst/INCR4 enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) val |= PESQI_BURST_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* CSEN - all CS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) val |= 3U << PESQI_CSEN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) writel(val, sqi->regs + PESQI_CONF_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* write poll count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) sqi->cur_speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) sqi->cur_mode = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static int pic32_sqi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct pic32_sqi *sqi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) sqi = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) sqi->master = master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) sqi->regs = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (IS_ERR(sqi->regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ret = PTR_ERR(sqi->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) goto err_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) sqi->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (sqi->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ret = sqi->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) goto err_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (IS_ERR(sqi->sys_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ret = PTR_ERR(sqi->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) dev_err(&pdev->dev, "no sys_clk ?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) goto err_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (IS_ERR(sqi->base_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ret = PTR_ERR(sqi->base_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) dev_err(&pdev->dev, "no base clk ?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) goto err_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ret = clk_prepare_enable(sqi->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dev_err(&pdev->dev, "sys clk enable failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) goto err_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ret = clk_prepare_enable(sqi->base_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) dev_err(&pdev->dev, "base clk enable failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) clk_disable_unprepare(sqi->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto err_free_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) init_completion(&sqi->xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* initialize hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) pic32_sqi_hw_init(sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* allocate buffers & descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ret = ring_desc_ring_alloc(sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) dev_err(&pdev->dev, "ring alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) goto err_disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* install irq handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ret = request_irq(sqi->irq, pic32_sqi_isr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dev_name(&pdev->dev), sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) goto err_free_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* register master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) master->num_chipselect = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) master->max_speed_hz = clk_get_rate(sqi->base_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) master->dma_alignment = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) master->max_dma_len = PESQI_BD_BUF_LEN_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) master->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) master->flags = SPI_MASTER_HALF_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) master->can_dma = pic32_sqi_can_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) master->transfer_one_message = pic32_sqi_one_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) master->prepare_transfer_hardware = pic32_sqi_prepare_hardware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) master->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ret = devm_spi_register_master(&pdev->dev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) dev_err(&master->dev, "failed registering spi master\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) free_irq(sqi->irq, sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) goto err_free_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) platform_set_drvdata(pdev, sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) err_free_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ring_desc_ring_free(sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) err_disable_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) clk_disable_unprepare(sqi->base_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) clk_disable_unprepare(sqi->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) err_free_master:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) spi_master_put(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static int pic32_sqi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct pic32_sqi *sqi = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* release resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) free_irq(sqi->irq, sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ring_desc_ring_free(sqi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* disable clk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) clk_disable_unprepare(sqi->base_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) clk_disable_unprepare(sqi->sys_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static const struct of_device_id pic32_sqi_of_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {.compatible = "microchip,pic32mzda-sqi",},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static struct platform_driver pic32_sqi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) .name = "sqi-pic32",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) .of_match_table = of_match_ptr(pic32_sqi_of_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) .probe = pic32_sqi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) .remove = pic32_sqi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) module_platform_driver(pic32_sqi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) MODULE_LICENSE("GPL v2");