Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) // Driver for AT91 USART Controllers as SPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) // Copyright (C) 2018 Microchip Technology Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) // Author: Radu Pirea <radu.pirea@microchip.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/dma-direction.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define US_CR			0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define US_MR			0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define US_IER			0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define US_IDR			0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define US_CSR			0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define US_RHR			0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define US_THR			0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define US_BRGR			0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define US_VERSION		0xFC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define US_CR_RSTRX		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define US_CR_RSTTX		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define US_CR_RXEN		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define US_CR_RXDIS		BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define US_CR_TXEN		BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define US_CR_TXDIS		BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define US_MR_SPI_MASTER	0x0E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define US_MR_CHRL		GENMASK(7, 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define US_MR_CPHA		BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define US_MR_CPOL		BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define US_MR_CLKO		BIT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define US_MR_WRDBT		BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define US_MR_LOOP		BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define US_IR_RXRDY		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define US_IR_TXRDY		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define US_IR_OVRE		BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define US_BRGR_SIZE		BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define US_MIN_CLK_DIV		0x06
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define US_MAX_CLK_DIV		BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define US_RESET		(US_CR_RSTRX | US_CR_RSTTX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define US_DISABLE		(US_CR_RXDIS | US_CR_TXDIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define US_ENABLE		(US_CR_RXEN | US_CR_TXEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define US_OVRE_RXRDY_IRQS	(US_IR_OVRE | US_IR_RXRDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define US_INIT \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	(US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define US_DMA_MIN_BYTES       16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define US_DMA_TIMEOUT         (msecs_to_jiffies(1000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) /* Register access macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define at91_usart_spi_readl(port, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	readl_relaxed((port)->regs + US_##reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define at91_usart_spi_writel(port, reg, value) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	writel_relaxed((value), (port)->regs + US_##reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define at91_usart_spi_readb(port, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	readb_relaxed((port)->regs + US_##reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #define at91_usart_spi_writeb(port, reg, value) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	writeb_relaxed((value), (port)->regs + US_##reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) struct at91_usart_spi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	struct platform_device  *mpdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	struct spi_transfer	*current_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	void __iomem		*regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	struct device		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	struct clk		*clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	struct completion	xfer_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	/*used in interrupt to protect data reading*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	spinlock_t		lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	phys_addr_t		phybase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	int			irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	unsigned int		current_tx_remaining_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	unsigned int		current_rx_remaining_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	u32			spi_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	u32			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	bool			xfer_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	bool			use_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void dma_callback(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	struct spi_controller   *ctlr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	struct at91_usart_spi   *aus = spi_master_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	aus->current_rx_remaining_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	complete(&aus->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static bool at91_usart_spi_can_dma(struct spi_controller *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 				   struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 				   struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	return aus->use_dma && xfer->len >= US_DMA_MIN_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static int at91_usart_spi_configure_dma(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 					struct at91_usart_spi *aus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	struct dma_slave_config slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	struct device *dev = &aus->mpdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	phys_addr_t phybase = aus->phybase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	ctlr->dma_tx = dma_request_chan(dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	if (IS_ERR_OR_NULL(ctlr->dma_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		if (IS_ERR(ctlr->dma_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			err = PTR_ERR(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			goto at91_usart_spi_error_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		dev_dbg(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			"DMA TX channel not available, SPI unable to use DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		goto at91_usart_spi_error_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	ctlr->dma_rx = dma_request_chan(dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (IS_ERR_OR_NULL(ctlr->dma_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		if (IS_ERR(ctlr->dma_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 			err = PTR_ERR(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			goto at91_usart_spi_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		dev_dbg(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			"DMA RX channel not available, SPI unable to use DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		goto at91_usart_spi_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	slave_config.dst_addr = (dma_addr_t)phybase + US_THR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	slave_config.src_addr = (dma_addr_t)phybase + US_RHR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	slave_config.src_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	slave_config.dst_maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	slave_config.device_fc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	slave_config.direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	if (dmaengine_slave_config(ctlr->dma_rx, &slave_config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		dev_err(&ctlr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			"failed to configure rx dma channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		goto at91_usart_spi_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	slave_config.direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (dmaengine_slave_config(ctlr->dma_tx, &slave_config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		dev_err(&ctlr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			"failed to configure tx dma channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		goto at91_usart_spi_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	aus->use_dma = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) at91_usart_spi_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (!IS_ERR_OR_NULL(ctlr->dma_tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		dma_release_channel(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	if (!IS_ERR_OR_NULL(ctlr->dma_rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		dma_release_channel(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	ctlr->dma_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	ctlr->dma_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) at91_usart_spi_error_clear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static void at91_usart_spi_release_dma(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (ctlr->dma_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		dma_release_channel(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	if (ctlr->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		dma_release_channel(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static void at91_usart_spi_stop_dma(struct spi_controller *ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (ctlr->dma_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		dmaengine_terminate_all(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (ctlr->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		dmaengine_terminate_all(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static int at91_usart_spi_dma_transfer(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 				       struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	struct dma_chan	 *rxchan = ctlr->dma_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	struct dma_chan *txchan = ctlr->dma_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	struct dma_async_tx_descriptor *rxdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	struct dma_async_tx_descriptor *txdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	/* Disable RX interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	at91_usart_spi_writel(aus, IDR, US_IR_RXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	rxdesc = dmaengine_prep_slave_sg(rxchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 					 xfer->rx_sg.sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 					 xfer->rx_sg.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 					 DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 					 DMA_PREP_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 					 DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (!rxdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		goto at91_usart_spi_err_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	txdesc = dmaengine_prep_slave_sg(txchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 					 xfer->tx_sg.sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 					 xfer->tx_sg.nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 					 DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 					 DMA_PREP_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 					 DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	if (!txdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		goto at91_usart_spi_err_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	rxdesc->callback = dma_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	rxdesc->callback_param = ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	cookie = rxdesc->tx_submit(rxdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	if (dma_submit_error(cookie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		goto at91_usart_spi_err_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	cookie = txdesc->tx_submit(txdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (dma_submit_error(cookie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		goto at91_usart_spi_err_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	rxchan->device->device_issue_pending(rxchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	txchan->device->device_issue_pending(txchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) at91_usart_spi_err_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	/* Enable RX interrupt if something fails and fallback to PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	at91_usart_spi_stop_dma(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static unsigned long at91_usart_spi_dma_timeout(struct at91_usart_spi *aus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	return wait_for_completion_timeout(&aus->xfer_completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 					   US_DMA_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	return aus->status & US_IR_TXRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static inline u32 at91_usart_spi_rx_ready(struct at91_usart_spi *aus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	return aus->status & US_IR_RXRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static inline u32 at91_usart_spi_check_overrun(struct at91_usart_spi *aus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	return aus->status & US_IR_OVRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static inline u32 at91_usart_spi_read_status(struct at91_usart_spi *aus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	aus->status = at91_usart_spi_readl(aus, CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	return aus->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static inline void at91_usart_spi_tx(struct at91_usart_spi *aus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	unsigned int len = aus->current_transfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	unsigned int remaining = aus->current_tx_remaining_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	const u8  *tx_buf = aus->current_transfer->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	if (!remaining)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	if (at91_usart_spi_tx_ready(aus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		at91_usart_spi_writeb(aus, THR, tx_buf[len - remaining]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		aus->current_tx_remaining_bytes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static inline void at91_usart_spi_rx(struct at91_usart_spi *aus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	int len = aus->current_transfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	int remaining = aus->current_rx_remaining_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	u8  *rx_buf = aus->current_transfer->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	if (!remaining)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	rx_buf[len - remaining] = at91_usart_spi_readb(aus, RHR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	aus->current_rx_remaining_bytes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) at91_usart_spi_set_xfer_speed(struct at91_usart_spi *aus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			      struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	at91_usart_spi_writel(aus, BRGR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 			      DIV_ROUND_UP(aus->spi_clk, xfer->speed_hz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static irqreturn_t at91_usart_spi_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	struct spi_controller *controller = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	struct at91_usart_spi *aus = spi_master_get_devdata(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	spin_lock(&aus->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	at91_usart_spi_read_status(aus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (at91_usart_spi_check_overrun(aus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		aus->xfer_failed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		at91_usart_spi_writel(aus, IDR, US_IR_OVRE | US_IR_RXRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		spin_unlock(&aus->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (at91_usart_spi_rx_ready(aus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		at91_usart_spi_rx(aus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		spin_unlock(&aus->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	spin_unlock(&aus->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int at91_usart_spi_setup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct at91_usart_spi *aus = spi_master_get_devdata(spi->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	u32 *ausd = spi->controller_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	unsigned int mr = at91_usart_spi_readl(aus, MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	if (spi->mode & SPI_CPOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		mr |= US_MR_CPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		mr &= ~US_MR_CPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	if (spi->mode & SPI_CPHA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		mr |= US_MR_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		mr &= ~US_MR_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (spi->mode & SPI_LOOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		mr |= US_MR_LOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		mr &= ~US_MR_LOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	if (!ausd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		ausd = kzalloc(sizeof(*ausd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		if (!ausd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		spi->controller_state = ausd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	*ausd = mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	dev_dbg(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		"setup: bpw %u mode 0x%x -> mr %d %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		spi->bits_per_word, spi->mode, spi->chip_select, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 				       struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 				       struct spi_transfer *xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	unsigned long dma_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	at91_usart_spi_set_xfer_speed(aus, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	aus->xfer_failed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	aus->current_transfer = xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	aus->current_tx_remaining_bytes = xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	aus->current_rx_remaining_bytes = xfer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	while ((aus->current_tx_remaining_bytes ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		aus->current_rx_remaining_bytes) && !aus->xfer_failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		reinit_completion(&aus->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		if (at91_usart_spi_can_dma(ctlr, spi, xfer) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		    !ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			ret = at91_usart_spi_dma_transfer(ctlr, xfer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			dma_timeout = at91_usart_spi_dma_timeout(aus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			if (WARN_ON(dma_timeout == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 				dev_err(&spi->dev, "DMA transfer timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 				return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			aus->current_tx_remaining_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			at91_usart_spi_read_status(aus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			at91_usart_spi_tx(aus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	if (aus->xfer_failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		dev_err(aus->dev, "Overrun!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static int at91_usart_spi_prepare_message(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 					  struct spi_message *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	struct spi_device *spi = message->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	u32 *ausd = spi->controller_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	at91_usart_spi_writel(aus, CR, US_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	at91_usart_spi_writel(aus, IER, US_OVRE_RXRDY_IRQS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	at91_usart_spi_writel(aus, MR, *ausd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static int at91_usart_spi_unprepare_message(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 					    struct spi_message *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	at91_usart_spi_writel(aus, IDR, US_OVRE_RXRDY_IRQS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static void at91_usart_spi_cleanup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	struct at91_usart_spi_device *ausd = spi->controller_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	spi->controller_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	kfree(ausd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static void at91_usart_spi_init(struct at91_usart_spi *aus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	at91_usart_spi_writel(aus, MR, US_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static int at91_usart_gpio_setup(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	struct device_node *np = pdev->dev.parent->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	int nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	nb = of_gpio_named_count(np, "cs-gpios");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	for (i = 0; i < nb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		if (cs_gpio < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 			return cs_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		if (gpio_is_valid(cs_gpio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			ret = devm_gpio_request_one(&pdev->dev, cs_gpio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 						    GPIOF_DIR_OUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 						    dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static int at91_usart_spi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	struct resource *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	struct spi_controller *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	struct at91_usart_spi *aus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	regs = platform_get_resource(to_platform_device(pdev->dev.parent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 				     IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	if (!regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	irq = platform_get_irq(to_platform_device(pdev->dev.parent), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	clk = devm_clk_get(pdev->dev.parent, "usart");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	if (IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		return PTR_ERR(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	controller = spi_alloc_master(&pdev->dev, sizeof(*aus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	if (!controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		goto at91_usart_spi_probe_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	ret = at91_usart_gpio_setup(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		goto at91_usart_spi_probe_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	controller->dev.of_node = pdev->dev.parent->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	controller->bits_per_word_mask = SPI_BPW_MASK(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	controller->setup = at91_usart_spi_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	controller->transfer_one = at91_usart_spi_transfer_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	controller->prepare_message = at91_usart_spi_prepare_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	controller->unprepare_message = at91_usart_spi_unprepare_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	controller->can_dma = at91_usart_spi_can_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	controller->cleanup = at91_usart_spi_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 						US_MIN_CLK_DIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	controller->min_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 						US_MAX_CLK_DIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	platform_set_drvdata(pdev, controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	aus = spi_master_get_devdata(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	aus->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	aus->regs = devm_ioremap_resource(&pdev->dev, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	if (IS_ERR(aus->regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		ret = PTR_ERR(aus->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		goto at91_usart_spi_probe_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	aus->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	aus->clk = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	ret = devm_request_irq(&pdev->dev, irq, at91_usart_spi_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			       dev_name(&pdev->dev), controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		goto at91_usart_spi_probe_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	ret = clk_prepare_enable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		goto at91_usart_spi_probe_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	aus->spi_clk = clk_get_rate(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	at91_usart_spi_init(aus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	aus->phybase = regs->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	aus->mpdev = to_platform_device(pdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	ret = at91_usart_spi_configure_dma(controller, aus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		goto at91_usart_fail_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	spin_lock_init(&aus->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	init_completion(&aus->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	ret = devm_spi_register_master(&pdev->dev, controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		goto at91_usart_fail_register_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		 "AT91 USART SPI Controller version 0x%x at %pa (irq %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		 at91_usart_spi_readl(aus, VERSION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		 &regs->start, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) at91_usart_fail_register_master:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	at91_usart_spi_release_dma(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) at91_usart_fail_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	clk_disable_unprepare(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) at91_usart_spi_probe_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	spi_master_put(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) __maybe_unused static int at91_usart_spi_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	struct spi_controller *ctlr = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	clk_disable_unprepare(aus->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	pinctrl_pm_select_sleep_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) __maybe_unused static int at91_usart_spi_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	struct spi_controller *ctrl = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	pinctrl_pm_select_default_state(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	return clk_prepare_enable(aus->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) __maybe_unused static int at91_usart_spi_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	struct spi_controller *ctrl = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	ret = spi_controller_suspend(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	if (!pm_runtime_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		at91_usart_spi_runtime_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) __maybe_unused static int at91_usart_spi_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	struct spi_controller *ctrl = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	if (!pm_runtime_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		ret = at91_usart_spi_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	at91_usart_spi_init(aus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	return spi_controller_resume(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static int at91_usart_spi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	struct spi_controller *ctlr = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	at91_usart_spi_release_dma(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	clk_disable_unprepare(aus->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static const struct dev_pm_ops at91_usart_spi_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	SET_SYSTEM_SLEEP_PM_OPS(at91_usart_spi_suspend, at91_usart_spi_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	SET_RUNTIME_PM_OPS(at91_usart_spi_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 			   at91_usart_spi_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static struct platform_driver at91_usart_spi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		.name = "at91_usart_spi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 		.pm = &at91_usart_spi_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	.probe = at91_usart_spi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	.remove = at91_usart_spi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) module_platform_driver(at91_usart_spi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) MODULE_DESCRIPTION("Microchip AT91 USART SPI Controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) MODULE_AUTHOR("Radu Pirea <radu.pirea@microchip.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) MODULE_ALIAS("platform:at91_usart_spi");