Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * au1550 psc spi controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * may work also with au1200, au1210, au1250
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * will not work on au1000, au1100 and au1500 (no full spi controller there)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (c) 2006 ATRON electronic GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Author: Jan Nikitenko <jan.nikitenko@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/spi/spi_bitbang.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/mach-au1x00/au1000.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/mach-au1x00/au1xxx_psc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/mach-au1x00/au1xxx_dbdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/mach-au1x00/au1550_spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static unsigned usedma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) module_param(usedma, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define AU1550_SPI_DEBUG_LOOPBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define AU1550_SPI_DBDMA_DESCRIPTORS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define AU1550_SPI_DMA_RXTMP_MINSIZE 2048U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) struct au1550_spi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct spi_bitbang bitbang;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	volatile psc_spi_t __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	unsigned len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	unsigned tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	unsigned rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	const u8 *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	u8 *rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	void (*rx_word)(struct au1550_spi *hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	void (*tx_word)(struct au1550_spi *hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	irqreturn_t (*irq_callback)(struct au1550_spi *hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	struct completion master_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	unsigned usedma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	u32 dma_tx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	u32 dma_rx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	u32 dma_tx_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	u32 dma_rx_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	u8 *dma_rx_tmpbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	unsigned dma_rx_tmpbuf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	u32 dma_rx_tmpbuf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct au1550_spi_info *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct resource *ioarea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) /* we use an 8-bit memory device for dma transfers to/from spi fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) static dbdev_tab_t au1550_spi_mem_dbdev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	.dev_id			= DBDMA_MEM_CHAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	.dev_flags		= DEV_FLAGS_ANYUSE|DEV_FLAGS_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	.dev_tsize		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	.dev_devwidth		= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	.dev_physaddr		= 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	.dev_intlevel		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	.dev_intpolarity	= 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) static int ddma_memid;	/* id to above mem dma device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  *  compute BRG and DIV bits to setup spi clock based on main input clock rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  *  that was specified in platform data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  *  according to au1550 datasheet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *    psc_tempclk = psc_mainclk / (2 << DIV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  *    spiclk = psc_tempclk / (2 * (BRG + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  *    BRG valid range is 4..63
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  *    DIV valid range is 0..3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static u32 au1550_spi_baudcfg(struct au1550_spi *hw, unsigned speed_hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	u32 mainclk_hz = hw->pdata->mainclk_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	u32 div, brg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	for (div = 0; div < 4; div++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		brg = mainclk_hz / speed_hz / (4 << div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		/* now we have BRG+1 in brg, so count with that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		if (brg < (4 + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			brg = (4 + 1);	/* speed_hz too big */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			break;		/* set lowest brg (div is == 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		if (brg <= (63 + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			break;		/* we have valid brg and div */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	if (div == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		div = 3;		/* speed_hz too small */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		brg = (63 + 1);		/* set highest brg and div */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	brg--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	return PSC_SPICFG_SET_BAUD(brg) | PSC_SPICFG_SET_DIV(div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline void au1550_spi_mask_ack_all(struct au1550_spi *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	hw->regs->psc_spimsk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		  PSC_SPIMSK_MM | PSC_SPIMSK_RR | PSC_SPIMSK_RO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		| PSC_SPIMSK_RU | PSC_SPIMSK_TR | PSC_SPIMSK_TO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		| PSC_SPIMSK_TU | PSC_SPIMSK_SD | PSC_SPIMSK_MD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	hw->regs->psc_spievent =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		  PSC_SPIEVNT_MM | PSC_SPIEVNT_RR | PSC_SPIEVNT_RO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		| PSC_SPIEVNT_RU | PSC_SPIEVNT_TR | PSC_SPIEVNT_TO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		| PSC_SPIEVNT_TU | PSC_SPIEVNT_SD | PSC_SPIEVNT_MD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void au1550_spi_reset_fifos(struct au1550_spi *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	u32 pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	hw->regs->psc_spipcr = PSC_SPIPCR_RC | PSC_SPIPCR_TC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		pcr = hw->regs->psc_spipcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	} while (pcr != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * dma transfers are used for the most common spi word size of 8-bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * we cannot easily change already set up dma channels' width, so if we wanted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * dma support for more than 8-bit words (up to 24 bits), we would need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * setup dma channels from scratch on each spi transfer, based on bits_per_word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * instead we have pre set up 8 bit dma channels supporting spi 4 to 8 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * transfers, and 9 to 24 bits spi transfers will be done in pio irq based mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * callbacks to handle dma or pio are set up in au1550_spi_bits_handlers_set()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static void au1550_spi_chipsel(struct spi_device *spi, int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	struct au1550_spi *hw = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	unsigned cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	u32 cfg, stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	switch (value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	case BITBANG_CS_INACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		if (hw->pdata->deactivate_cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			hw->pdata->deactivate_cs(hw->pdata, spi->chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 					cspol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	case BITBANG_CS_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		cfg = hw->regs->psc_spicfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		if (spi->mode & SPI_CPOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			cfg |= PSC_SPICFG_BI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			cfg &= ~PSC_SPICFG_BI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		if (spi->mode & SPI_CPHA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			cfg &= ~PSC_SPICFG_CDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			cfg |= PSC_SPICFG_CDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		if (spi->mode & SPI_LSB_FIRST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			cfg |= PSC_SPICFG_MLF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			cfg &= ~PSC_SPICFG_MLF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		if (hw->usedma && spi->bits_per_word <= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			cfg &= ~PSC_SPICFG_DD_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			cfg |= PSC_SPICFG_DD_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		cfg = PSC_SPICFG_CLR_LEN(cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		cfg |= PSC_SPICFG_SET_LEN(spi->bits_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		cfg = PSC_SPICFG_CLR_BAUD(cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		cfg &= ~PSC_SPICFG_SET_DIV(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		cfg |= au1550_spi_baudcfg(hw, spi->max_speed_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		hw->regs->psc_spicfg = cfg | PSC_SPICFG_DE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			stat = hw->regs->psc_spistat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		} while ((stat & PSC_SPISTAT_DR) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		if (hw->pdata->activate_cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			hw->pdata->activate_cs(hw->pdata, spi->chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 					cspol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	struct au1550_spi *hw = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	unsigned bpw, hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	u32 cfg, stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		bpw = t->bits_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		hz = t->speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		bpw = spi->bits_per_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		hz = spi->max_speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (!hz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	cfg = hw->regs->psc_spicfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	if (hw->usedma && bpw <= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		cfg &= ~PSC_SPICFG_DD_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		cfg |= PSC_SPICFG_DD_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	cfg = PSC_SPICFG_CLR_LEN(cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	cfg |= PSC_SPICFG_SET_LEN(bpw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	cfg = PSC_SPICFG_CLR_BAUD(cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	cfg &= ~PSC_SPICFG_SET_DIV(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	cfg |= au1550_spi_baudcfg(hw, hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	hw->regs->psc_spicfg = cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (cfg & PSC_SPICFG_DE_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			stat = hw->regs->psc_spistat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		} while ((stat & PSC_SPISTAT_DR) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	au1550_spi_reset_fifos(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	au1550_spi_mask_ack_all(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  * for dma spi transfers, we have to setup rx channel, otherwise there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  * no reliable way how to recognize that spi transfer is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  * dma complete callbacks are called before real spi transfer is finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  * and if only tx dma channel is set up (and rx fifo overflow event masked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * spi master done event irq is not generated unless rx fifo is empty (emptied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * so we need rx tmp buffer to use for rx dma if user does not provide one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	hw->dma_rx_tmpbuf = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	if (!hw->dma_rx_tmpbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	hw->dma_rx_tmpbuf_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		kfree(hw->dma_rx_tmpbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		hw->dma_rx_tmpbuf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		hw->dma_rx_tmpbuf_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void au1550_spi_dma_rxtmp_free(struct au1550_spi *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	dma_unmap_single(hw->dev, hw->dma_rx_tmpbuf_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			hw->dma_rx_tmpbuf_size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	kfree(hw->dma_rx_tmpbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	hw->dma_rx_tmpbuf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	hw->dma_rx_tmpbuf_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct au1550_spi *hw = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	dma_addr_t dma_tx_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	dma_addr_t dma_rx_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	u32 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	hw->len = t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	hw->tx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	hw->rx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	hw->tx = t->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	hw->rx = t->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	dma_tx_addr = t->tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	dma_rx_addr = t->rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	 * check if buffers are already dma mapped, map them otherwise:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	 * - first map the TX buffer, so cache data gets written to memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	 * - then map the RX buffer, so that cache entries (with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	 *   soon-to-be-stale data) get removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	 * use rx buffer in place of tx if tx buffer was not provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 * use temp rx buffer (preallocated or realloc to fit) for rx dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (t->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		if (t->tx_dma == 0) {	/* if DMA_ADDR_INVALID, map it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 			dma_tx_addr = dma_map_single(hw->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 					(void *)t->tx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 					t->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			if (dma_mapping_error(hw->dev, dma_tx_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 				dev_err(hw->dev, "tx dma map error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	if (t->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		if (t->rx_dma == 0) {	/* if DMA_ADDR_INVALID, map it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			dma_rx_addr = dma_map_single(hw->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 					(void *)t->rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 					t->len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			if (dma_mapping_error(hw->dev, dma_rx_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 				dev_err(hw->dev, "rx dma map error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		if (t->len > hw->dma_rx_tmpbuf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			au1550_spi_dma_rxtmp_free(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			ret = au1550_spi_dma_rxtmp_alloc(hw, max(t->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 					AU1550_SPI_DMA_RXTMP_MINSIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		hw->rx = hw->dma_rx_tmpbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		dma_rx_addr = hw->dma_rx_tmpbuf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		dma_sync_single_for_device(hw->dev, dma_rx_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			t->len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	if (!t->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		dma_sync_single_for_device(hw->dev, dma_rx_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 				t->len, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		hw->tx = hw->rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	/* put buffers on the ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 				    t->len, DDMA_FLAGS_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		dev_err(hw->dev, "rx dma put dest error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 				      t->len, DDMA_FLAGS_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		dev_err(hw->dev, "tx dma put source error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	au1xxx_dbdma_start(hw->dma_rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	au1xxx_dbdma_start(hw->dma_tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	/* by default enable nearly all events interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	hw->regs->psc_spimsk = PSC_SPIMSK_SD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	/* start the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	hw->regs->psc_spipcr = PSC_SPIPCR_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	wait_for_completion(&hw->master_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	au1xxx_dbdma_stop(hw->dma_tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	au1xxx_dbdma_stop(hw->dma_rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	if (!t->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		/* using the temporal preallocated and premapped buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		dma_sync_single_for_cpu(hw->dev, dma_rx_addr, t->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	/* unmap buffers if mapped above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	if (t->rx_buf && t->rx_dma == 0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		dma_unmap_single(hw->dev, dma_rx_addr, t->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 			DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (t->tx_buf && t->tx_dma == 0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		dma_unmap_single(hw->dev, dma_tx_addr, t->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	u32 stat, evnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	stat = hw->regs->psc_spistat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	evnt = hw->regs->psc_spievent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	if ((stat & PSC_SPISTAT_DI) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		dev_err(hw->dev, "Unexpected IRQ!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 				| PSC_SPIEVNT_RU | PSC_SPIEVNT_TO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 				| PSC_SPIEVNT_TU | PSC_SPIEVNT_SD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			!= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		 * due to an spi error we consider transfer as done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		 * so mask all events until before next transfer start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		 * and stop the possibly running dma immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		au1550_spi_mask_ack_all(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		au1xxx_dbdma_stop(hw->dma_rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		au1xxx_dbdma_stop(hw->dma_tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		/* get number of transferred bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		hw->rx_count = hw->len - au1xxx_get_dma_residue(hw->dma_rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		hw->tx_count = hw->len - au1xxx_get_dma_residue(hw->dma_tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		au1xxx_dbdma_reset(hw->dma_rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		au1xxx_dbdma_reset(hw->dma_tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		au1550_spi_reset_fifos(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		if (evnt == PSC_SPIEVNT_RO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 			dev_err(hw->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 				"dma transfer: receive FIFO overflow!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			dev_err(hw->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 				"dma transfer: unexpected SPI error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 				"(event=0x%x stat=0x%x)!\n", evnt, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		complete(&hw->master_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	if ((evnt & PSC_SPIEVNT_MD) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		/* transfer completed successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		au1550_spi_mask_ack_all(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		hw->rx_count = hw->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		hw->tx_count = hw->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		complete(&hw->master_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* routines to handle different word sizes in pio mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) #define AU1550_SPI_RX_WORD(size, mask)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static void au1550_spi_rx_word_##size(struct au1550_spi *hw)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	u32 fifoword = hw->regs->psc_spitxrx & (u32)(mask);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	wmb(); /* drain writebuffer */					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	if (hw->rx) {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		*(u##size *)hw->rx = (u##size)fifoword;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		hw->rx += (size) / 8;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	hw->rx_count += (size) / 8;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) #define AU1550_SPI_TX_WORD(size, mask)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static void au1550_spi_tx_word_##size(struct au1550_spi *hw)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	u32 fifoword = 0;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	if (hw->tx) {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		fifoword = *(u##size *)hw->tx & (u32)(mask);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		hw->tx += (size) / 8;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	hw->tx_count += (size) / 8;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	if (hw->tx_count >= hw->len)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		fifoword |= PSC_SPITXRX_LC;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	hw->regs->psc_spitxrx = fifoword;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	wmb(); /* drain writebuffer */					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) AU1550_SPI_RX_WORD(8,0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) AU1550_SPI_RX_WORD(16,0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) AU1550_SPI_RX_WORD(32,0xffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) AU1550_SPI_TX_WORD(8,0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) AU1550_SPI_TX_WORD(16,0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) AU1550_SPI_TX_WORD(32,0xffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	u32 stat, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	struct au1550_spi *hw = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	hw->tx = t->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	hw->rx = t->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	hw->len = t->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	hw->tx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	hw->rx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	/* by default enable nearly all events after filling tx fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	mask = PSC_SPIMSK_SD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	/* fill the transmit FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	while (hw->tx_count < hw->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		hw->tx_word(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		if (hw->tx_count >= hw->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 			/* mask tx fifo request interrupt as we are done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 			mask |= PSC_SPIMSK_TR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		stat = hw->regs->psc_spistat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		if (stat & PSC_SPISTAT_TF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	/* enable event interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	hw->regs->psc_spimsk = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	/* start the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	hw->regs->psc_spipcr = PSC_SPIPCR_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	wait_for_completion(&hw->master_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	int busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	u32 stat, evnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	stat = hw->regs->psc_spistat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	evnt = hw->regs->psc_spievent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	if ((stat & PSC_SPISTAT_DI) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		dev_err(hw->dev, "Unexpected IRQ!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 				| PSC_SPIEVNT_RU | PSC_SPIEVNT_TO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 				| PSC_SPIEVNT_SD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 			!= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		 * due to an error we consider transfer as done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		 * so mask all events until before next transfer start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		au1550_spi_mask_ack_all(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		au1550_spi_reset_fifos(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		dev_err(hw->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			"pio transfer: unexpected SPI error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 			"(event=0x%x stat=0x%x)!\n", evnt, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		complete(&hw->master_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	 * while there is something to read from rx fifo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	 * or there is a space to write to tx fifo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		stat = hw->regs->psc_spistat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		 * Take care to not let the Rx FIFO overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		 * We only write a byte if we have read one at least. Initially,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		 * the write fifo is full, so we should read from the read fifo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		 * first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		 * In case we miss a word from the read fifo, we should get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		 * RO event and should back out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		if (!(stat & PSC_SPISTAT_RE) && hw->rx_count < hw->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 			hw->rx_word(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 			busy = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 			if (!(stat & PSC_SPISTAT_TF) && hw->tx_count < hw->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 				hw->tx_word(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	} while (busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	 * Restart the SPI transmission in case of a transmit underflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	 * This seems to work despite the notes in the Au1550 data book
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	 * of Figure 8-4 with flowchart for SPI master operation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	 * """Note 1: An XFR Error Interrupt occurs, unless masked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	 * for any of the following events: Tx FIFO Underflow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	 * Rx FIFO Overflow, or Multiple-master Error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	 *    Note 2: In case of a Tx Underflow Error, all zeroes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	 * transmitted."""
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	 * By simply restarting the spi transfer on Tx Underflow Error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	 * we assume that spi transfer was paused instead of zeroes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	 * transmittion mentioned in the Note 2 of Au1550 data book.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	if (evnt & PSC_SPIEVNT_TU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		hw->regs->psc_spipcr = PSC_SPIPCR_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	if (hw->rx_count >= hw->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 		/* transfer completed successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		au1550_spi_mask_ack_all(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		complete(&hw->master_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	struct au1550_spi *hw = spi_master_get_devdata(spi->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	return hw->txrx_bufs(spi, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static irqreturn_t au1550_spi_irq(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	struct au1550_spi *hw = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	return hw->irq_callback(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	if (bpw <= 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		if (hw->usedma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 			hw->txrx_bufs = &au1550_spi_dma_txrxb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 			hw->irq_callback = &au1550_spi_dma_irq_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			hw->rx_word = &au1550_spi_rx_word_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 			hw->tx_word = &au1550_spi_tx_word_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 			hw->txrx_bufs = &au1550_spi_pio_txrxb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 			hw->irq_callback = &au1550_spi_pio_irq_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	} else if (bpw <= 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		hw->rx_word = &au1550_spi_rx_word_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		hw->tx_word = &au1550_spi_tx_word_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		hw->txrx_bufs = &au1550_spi_pio_txrxb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		hw->irq_callback = &au1550_spi_pio_irq_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 		hw->rx_word = &au1550_spi_rx_word_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		hw->tx_word = &au1550_spi_tx_word_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 		hw->txrx_bufs = &au1550_spi_pio_txrxb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		hw->irq_callback = &au1550_spi_pio_irq_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	u32 stat, cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	/* set up the PSC for SPI mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	hw->regs->psc_ctrl = PSC_CTRL_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	hw->regs->psc_sel = PSC_SEL_PS_SPIMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	hw->regs->psc_spicfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	hw->regs->psc_ctrl = PSC_CTRL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		stat = hw->regs->psc_spistat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	} while ((stat & PSC_SPISTAT_SR) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	cfg = hw->usedma ? 0 : PSC_SPICFG_DD_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	cfg |= PSC_SPICFG_SET_LEN(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	cfg |= PSC_SPICFG_RT_FIFO8 | PSC_SPICFG_TT_FIFO8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	/* use minimal allowed brg and div values as initial setting: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	cfg |= PSC_SPICFG_SET_BAUD(4) | PSC_SPICFG_SET_DIV(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) #ifdef AU1550_SPI_DEBUG_LOOPBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	cfg |= PSC_SPICFG_LB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	hw->regs->psc_spicfg = cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	au1550_spi_mask_ack_all(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	hw->regs->psc_spicfg |= PSC_SPICFG_DE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 		stat = hw->regs->psc_spistat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		wmb(); /* drain writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	} while ((stat & PSC_SPISTAT_DR) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	au1550_spi_reset_fifos(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static int au1550_spi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	struct au1550_spi *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	struct spi_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	if (master == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 		dev_err(&pdev->dev, "No memory for spi_master\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 		goto err_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	/* the spi->mode bits understood by this driver: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	hw = spi_master_get_devdata(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	hw->master = master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	hw->pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	hw->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	if (hw->pdata == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 		dev_err(&pdev->dev, "No platform data supplied\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 		err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 		goto err_no_pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 		dev_err(&pdev->dev, "no IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 		goto err_no_iores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	hw->irq = r->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	hw->usedma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 		hw->dma_tx_id = r->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 		r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 		if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 			hw->dma_rx_id = r->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 			if (usedma && ddma_memid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 				if (pdev->dev.dma_mask == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 					dev_warn(&pdev->dev, "no dma mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 					hw->usedma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 		dev_err(&pdev->dev, "no mmio resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 		goto err_no_iores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	hw->ioarea = request_mem_region(r->start, sizeof(psc_spi_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 					pdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	if (!hw->ioarea) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 		dev_err(&pdev->dev, "Cannot reserve iomem region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 		err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 		goto err_no_iores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	hw->regs = (psc_spi_t __iomem *)ioremap(r->start, sizeof(psc_spi_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	if (!hw->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 		dev_err(&pdev->dev, "cannot ioremap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 		err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 		goto err_ioremap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	platform_set_drvdata(pdev, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	init_completion(&hw->master_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	hw->bitbang.master = hw->master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	hw->bitbang.setup_transfer = au1550_spi_setupxfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	hw->bitbang.chipselect = au1550_spi_chipsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	if (hw->usedma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 		hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(ddma_memid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 			hw->dma_tx_id, NULL, (void *)hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 		if (hw->dma_tx_ch == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 				"Cannot allocate tx dma channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 			err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 			goto err_no_txdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 		au1xxx_dbdma_set_devwidth(hw->dma_tx_ch, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 		if (au1xxx_dbdma_ring_alloc(hw->dma_tx_ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 			AU1550_SPI_DBDMA_DESCRIPTORS) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 				"Cannot allocate tx dma descriptors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 			err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 			goto err_no_txdma_descr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 		hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 			ddma_memid, NULL, (void *)hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 		if (hw->dma_rx_ch == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 				"Cannot allocate rx dma channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 			err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 			goto err_no_rxdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 		au1xxx_dbdma_set_devwidth(hw->dma_rx_ch, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 		if (au1xxx_dbdma_ring_alloc(hw->dma_rx_ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 			AU1550_SPI_DBDMA_DESCRIPTORS) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 				"Cannot allocate rx dma descriptors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 			err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 			goto err_no_rxdma_descr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 		err = au1550_spi_dma_rxtmp_alloc(hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 			AU1550_SPI_DMA_RXTMP_MINSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 				"Cannot allocate initial rx dma tmp buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 			goto err_dma_rxtmp_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 	au1550_spi_bits_handlers_set(hw, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 	err = request_irq(hw->irq, au1550_spi_irq, 0, pdev->name, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 		dev_err(&pdev->dev, "Cannot claim IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 		goto err_no_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	master->bus_num = pdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 	master->num_chipselect = hw->pdata->num_chipselect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	 *  precompute valid range for spi freq - from au1550 datasheet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 	 *    psc_tempclk = psc_mainclk / (2 << DIV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 	 *    spiclk = psc_tempclk / (2 * (BRG + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 	 *    BRG valid range is 4..63
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 	 *    DIV valid range is 0..3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 	 *  round the min and max frequencies to values that would still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 	 *  produce valid brg and div
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 		int min_div = (2 << 0) * (2 * (4 + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 		int max_div = (2 << 3) * (2 * (63 + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 		master->max_speed_hz = hw->pdata->mainclk_hz / min_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 		master->min_speed_hz =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 				hw->pdata->mainclk_hz / (max_div + 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 	au1550_spi_setup_psc_as_spi(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 	err = spi_bitbang_start(&hw->bitbang);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 		dev_err(&pdev->dev, "Failed to register SPI master\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 		goto err_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 	dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 		"spi master registered: bus_num=%d num_chipselect=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 		master->bus_num, master->num_chipselect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) err_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 	free_irq(hw->irq, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) err_no_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 	au1550_spi_dma_rxtmp_free(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) err_dma_rxtmp_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) err_no_rxdma_descr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 	if (hw->usedma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 		au1xxx_dbdma_chan_free(hw->dma_rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) err_no_rxdma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) err_no_txdma_descr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 	if (hw->usedma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 		au1xxx_dbdma_chan_free(hw->dma_tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) err_no_txdma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 	iounmap((void __iomem *)hw->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) err_ioremap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 	release_mem_region(r->start, sizeof(psc_spi_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) err_no_iores:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) err_no_pdata:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 	spi_master_put(hw->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) err_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) static int au1550_spi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 	struct au1550_spi *hw = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 	dev_info(&pdev->dev, "spi master remove: bus_num=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 		hw->master->bus_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) 	spi_bitbang_stop(&hw->bitbang);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) 	free_irq(hw->irq, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) 	iounmap((void __iomem *)hw->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) 	release_mem_region(hw->ioarea->start, sizeof(psc_spi_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) 	if (hw->usedma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) 		au1550_spi_dma_rxtmp_free(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) 		au1xxx_dbdma_chan_free(hw->dma_rx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) 		au1xxx_dbdma_chan_free(hw->dma_tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) 	spi_master_put(hw->master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /* work with hotplug and coldplug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) MODULE_ALIAS("platform:au1550-spi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static struct platform_driver au1550_spi_drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) 	.probe = au1550_spi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) 	.remove = au1550_spi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) 		.name = "au1550-spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) static int __init au1550_spi_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) 	 * create memory device with 8 bits dev_devwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) 	 * needed for proper byte ordering to spi fifo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) 	switch (alchemy_get_cputype()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) 	case ALCHEMY_CPU_AU1550:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) 	case ALCHEMY_CPU_AU1200:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) 	case ALCHEMY_CPU_AU1300:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) 	if (usedma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) 		ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) 		if (!ddma_memid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) 			printk(KERN_ERR "au1550-spi: cannot add memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) 					"dbdma device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) 	return platform_driver_register(&au1550_spi_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) module_init(au1550_spi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) static void __exit au1550_spi_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) 	if (usedma && ddma_memid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) 		au1xxx_ddma_del_device(ddma_memid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) 	platform_driver_unregister(&au1550_spi_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) module_exit(au1550_spi_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) MODULE_DESCRIPTION("Au1550 PSC SPI Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) MODULE_AUTHOR("Jan Nikitenko <jan.nikitenko@gmail.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) MODULE_LICENSE("GPL");