^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for Broadcom BCM2835 SPI Controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012 Chris Boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2013 Stephen Warren
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2015 Martin Sperl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This driver is inspired by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * spi-atmel.c, Copyright (C) 2006 Atmel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/gpio/machine.h> /* FIXME: using chip internals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/gpio/driver.h> /* FIXME: using chip internals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/spi/spi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* SPI register offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define BCM2835_SPI_CS 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define BCM2835_SPI_FIFO 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define BCM2835_SPI_CLK 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define BCM2835_SPI_DLEN 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define BCM2835_SPI_LTOH 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define BCM2835_SPI_DC 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Bitfields in CS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define BCM2835_SPI_CS_LEN_LONG 0x02000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define BCM2835_SPI_CS_DMA_LEN 0x01000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define BCM2835_SPI_CS_CSPOL2 0x00800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define BCM2835_SPI_CS_CSPOL1 0x00400000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define BCM2835_SPI_CS_CSPOL0 0x00200000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define BCM2835_SPI_CS_RXF 0x00100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define BCM2835_SPI_CS_RXR 0x00080000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define BCM2835_SPI_CS_TXD 0x00040000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define BCM2835_SPI_CS_RXD 0x00020000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define BCM2835_SPI_CS_DONE 0x00010000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define BCM2835_SPI_CS_LEN 0x00002000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define BCM2835_SPI_CS_REN 0x00001000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define BCM2835_SPI_CS_ADCS 0x00000800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define BCM2835_SPI_CS_INTR 0x00000400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define BCM2835_SPI_CS_INTD 0x00000200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define BCM2835_SPI_CS_DMAEN 0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define BCM2835_SPI_CS_TA 0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define BCM2835_SPI_CS_CSPOL 0x00000040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define BCM2835_SPI_CS_CLEAR_RX 0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define BCM2835_SPI_CS_CLEAR_TX 0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define BCM2835_SPI_CS_CPOL 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define BCM2835_SPI_CS_CPHA 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define BCM2835_SPI_CS_CS_10 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define BCM2835_SPI_CS_CS_01 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define BCM2835_SPI_FIFO_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define BCM2835_SPI_FIFO_SIZE_3_4 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define BCM2835_SPI_DMA_MIN_LENGTH 96
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define BCM2835_SPI_NUM_CS 24 /* raise as necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) | SPI_NO_CS | SPI_3WIRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define DRV_NAME "spi-bcm2835"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* define polling limits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static unsigned int polling_limit_us = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) module_param(polling_limit_us, uint, 0664);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) MODULE_PARM_DESC(polling_limit_us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) "time in us to run a transfer in polling mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * struct bcm2835_spi - BCM2835 SPI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * @regs: base address of register map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * @clk: core clock, divided to calculate serial clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * @clk_hz: core clock cached speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * @tfr: SPI transfer currently processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @ctlr: SPI controller reverse lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * @tx_buf: pointer whence next transmitted byte is read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * @rx_buf: pointer where next received byte is written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @tx_len: remaining bytes to transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @rx_len: remaining bytes to receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @tx_prologue: bytes transmitted without DMA if first TX sglist entry's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * length is not a multiple of 4 (to overcome hardware limitation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @rx_prologue: bytes received without DMA if first RX sglist entry's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * length is not a multiple of 4 (to overcome hardware limitation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @prepare_cs: precalculated CS register value for ->prepare_message()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * (uses slave-specific clock polarity and phase settings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @debugfs_dir: the debugfs directory - neede to remove debugfs when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * unloading the module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * @count_transfer_polling: count of how often polling mode is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * @count_transfer_irq: count of how often interrupt mode is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * @count_transfer_irq_after_polling: count of how often we fall back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * interrupt mode after starting in polling mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * These are counted as well in @count_transfer_polling and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * @count_transfer_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * @count_transfer_dma: count how often dma mode is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * @chip_select: SPI slave currently selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * @tx_dma_active: whether a TX DMA descriptor is in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @rx_dma_active: whether a RX DMA descriptor is in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * (used by bcm2835_spi_dma_tx_done() to handle a race)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * (cyclically copies from zero page to TX FIFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @fill_tx_addr: bus address of zero page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * @clear_rx_addr: bus address of @clear_rx_cs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * @clear_rx_cs: precalculated CS register value to clear RX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * (uses slave-specific clock polarity and phase settings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct bcm2835_spi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned long clk_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct spi_transfer *tfr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) const u8 *tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u8 *rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int tx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) int tx_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int rx_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned int tx_spillover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u32 prepare_cs[BCM2835_SPI_NUM_CS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct dentry *debugfs_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u64 count_transfer_polling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u64 count_transfer_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u64 count_transfer_irq_after_polling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u64 count_transfer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u8 chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned int tx_dma_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned int rx_dma_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct dma_async_tx_descriptor *fill_tx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) dma_addr_t fill_tx_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct dma_async_tx_descriptor *clear_rx_desc[BCM2835_SPI_NUM_CS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) dma_addr_t clear_rx_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 clear_rx_cs[BCM2835_SPI_NUM_CS] ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #if defined(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) const char *dname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) char name[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct dentry *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* get full name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) snprintf(name, sizeof(name), "spi-bcm2835-%s", dname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* the base directory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) dir = debugfs_create_dir(name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) bs->debugfs_dir = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* the counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) debugfs_create_u64("count_transfer_polling", 0444, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) &bs->count_transfer_polling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) debugfs_create_u64("count_transfer_irq", 0444, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) &bs->count_transfer_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) &bs->count_transfer_irq_after_polling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) debugfs_create_u64("count_transfer_dma", 0444, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) &bs->count_transfer_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) debugfs_remove_recursive(bs->debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) bs->debugfs_dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) const char *dname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #endif /* CONFIG_DEBUG_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return readl(bs->regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned int reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) writel(val, bs->regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u8 byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) while ((bs->rx_len) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (bs->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) *bs->rx_buf++ = byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) bs->rx_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u8 byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) while ((bs->tx_len) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) byte = bs->tx_buf ? *bs->tx_buf++ : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) bs->tx_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * bcm2835_rd_fifo_count() - blindly read exactly @count bytes from RX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * @bs: BCM2835 SPI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * @count: bytes to read from RX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * The caller must ensure that @bs->rx_len is greater than or equal to @count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * that the RX FIFO contains at least @count bytes and that the DMA Enable flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * in the CS register is set (such that a read from the FIFO register receives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * 32-bit instead of just 8-bit). Moreover @bs->rx_buf must not be %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static inline void bcm2835_rd_fifo_count(struct bcm2835_spi *bs, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) bs->rx_len -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) len = min(count, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) memcpy(bs->rx_buf, &val, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) bs->rx_buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) } while (count > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * bcm2835_wr_fifo_count() - blindly write exactly @count bytes to TX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * @bs: BCM2835 SPI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * @count: bytes to write to TX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * The caller must ensure that @bs->tx_len is greater than or equal to @count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * that the TX FIFO can accommodate @count bytes and that the DMA Enable flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * in the CS register is set (such that a write to the FIFO register transmits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * 32-bit instead of just 8-bit).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) bs->tx_len -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (bs->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) len = min(count, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) memcpy(&val, bs->tx_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) bs->tx_buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) } while (count > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * bcm2835_wait_tx_fifo_empty() - busy-wait for TX FIFO to empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * @bs: BCM2835 SPI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * The caller must ensure that the RX FIFO can accommodate as many bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * as have been written to the TX FIFO: Transmission is halted once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * RX FIFO is full, causing this function to spin forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static inline void bcm2835_wait_tx_fifo_empty(struct bcm2835_spi *bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * bcm2835_rd_fifo_blind() - blindly read up to @count bytes from RX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * @bs: BCM2835 SPI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * @count: bytes available for reading in RX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static inline void bcm2835_rd_fifo_blind(struct bcm2835_spi *bs, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) count = min(count, bs->rx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) bs->rx_len -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (bs->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) *bs->rx_buf++ = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) } while (--count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * bcm2835_wr_fifo_blind() - blindly write up to @count bytes to TX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * @bs: BCM2835 SPI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * @count: bytes available for writing in TX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) count = min(count, bs->tx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) bs->tx_len -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) val = bs->tx_buf ? *bs->tx_buf++ : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) } while (--count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static void bcm2835_spi_reset_hw(struct bcm2835_spi *bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* Disable SPI interrupts and transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) cs &= ~(BCM2835_SPI_CS_INTR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) BCM2835_SPI_CS_INTD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) BCM2835_SPI_CS_DMAEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) BCM2835_SPI_CS_TA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * Transmission sometimes breaks unless the DONE bit is written at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * end of every transfer. The spec says it's a RO bit. Either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * spec is wrong and the bit is actually of type RW1C, or it's a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * hardware erratum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) cs |= BCM2835_SPI_CS_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* and reset RX/TX FIFOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* and reset the SPI_HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) bcm2835_wr(bs, BCM2835_SPI_CS, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* as well as DLEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) bcm2835_wr(bs, BCM2835_SPI_DLEN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct bcm2835_spi *bs = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * An interrupt is signaled either if DONE is set (TX FIFO empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * or if RXR is set (RX FIFO >= ¾ full).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (cs & BCM2835_SPI_CS_RXF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) else if (cs & BCM2835_SPI_CS_RXR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE_3_4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (bs->tx_len && cs & BCM2835_SPI_CS_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* Read as many bytes as possible from FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) bcm2835_rd_fifo(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* Write as many bytes as possible to FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) bcm2835_wr_fifo(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (!bs->rx_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* Transfer complete - reset SPI HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) bcm2835_spi_reset_hw(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* wake up the framework */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) complete(&bs->ctlr->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct spi_transfer *tfr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u32 cs, bool fifo_empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* update usage statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) bs->count_transfer_irq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * Enable HW block, but with interrupts still disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Otherwise the empty TX FIFO would immediately trigger an interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* fill TX FIFO as much as possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (fifo_empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) bcm2835_wr_fifo(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) bcm2835_wr(bs, BCM2835_SPI_CS, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* signal that we need to wait for completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * @ctlr: SPI master controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * @tfr: SPI transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * @bs: BCM2835 SPI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * @cs: CS register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * Only the final write access is permitted to transmit less than 4 bytes, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * SPI controller deduces its intended size from the DLEN register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * If a TX or RX sglist contains multiple entries, one per page, and the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * entry starts in the middle of a page, that first entry's length may not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * a multiple of 4. Subsequent entries are fine because they span an entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * page, hence do have a length that's a multiple of 4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * This cannot happen with kmalloc'ed buffers (which is what most clients use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * because they are contiguous in physical memory and therefore not split on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * page boundaries by spi_map_buf(). But it *can* happen with vmalloc'ed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * The DMA engine is incapable of combining sglist entries into a continuous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * stream of 4 byte chunks, it treats every entry separately: A TX entry is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * entry is rounded up by throwing away received bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * Overcome this limitation by transferring the first few bytes without DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * The residue of 1 byte in the RX FIFO is picked up by DMA. Together with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * Caution, the additional 4 bytes spill over to the second TX sglist entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * if the length of the first is *exactly* 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * At most 6 bytes are written and at most 3 bytes read. Do we know the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * transfer has this many bytes? Yes, see BCM2835_SPI_DMA_MIN_LENGTH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * by the DMA engine. Toggling the DMA Enable flag in the CS register switches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * the width but also garbles the FIFO's contents. The prologue must therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * be transmitted in 32-bit width to ensure that the following DMA transfer can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * pick up the residue in the RX FIFO in ungarbled form.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct spi_transfer *tfr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct bcm2835_spi *bs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) u32 cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int tx_remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) bs->tfr = tfr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) bs->tx_prologue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) bs->rx_prologue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) bs->tx_spillover = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (bs->rx_prologue > bs->tx_prologue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) bs->tx_prologue = bs->rx_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) bs->tx_prologue += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) bs->tx_spillover =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) !(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!bs->tx_prologue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Write and read RX prologue. Adjust first entry in RX sglist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (bs->rx_prologue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) | BCM2835_SPI_CS_DMAEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) bcm2835_wr_fifo_count(bs, bs->rx_prologue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) bcm2835_wait_tx_fifo_empty(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) bcm2835_rd_fifo_count(bs, bs->rx_prologue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) | BCM2835_SPI_CS_CLEAR_TX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) | BCM2835_SPI_CS_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dma_sync_single_for_device(ctlr->dma_rx->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) sg_dma_address(&tfr->rx_sg.sgl[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) bs->rx_prologue, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (!bs->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * Write remaining TX prologue. Adjust first entry in TX sglist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * Also adjust second entry if prologue spills over to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) tx_remaining = bs->tx_prologue - bs->rx_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (tx_remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) bcm2835_wr(bs, BCM2835_SPI_DLEN, tx_remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) | BCM2835_SPI_CS_DMAEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) bcm2835_wr_fifo_count(bs, tx_remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) bcm2835_wait_tx_fifo_empty(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) | BCM2835_SPI_CS_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (likely(!bs->tx_spillover)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) sg_dma_len(&tfr->tx_sg.sgl[0]) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) sg_dma_address(&tfr->tx_sg.sgl[1]) += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) sg_dma_len(&tfr->tx_sg.sgl[1]) -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * bcm2835_spi_undo_prologue() - reconstruct original sglist state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * @bs: BCM2835 SPI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * Undo changes which were made to an SPI transfer's sglist when transmitting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * the prologue. This is necessary to ensure the same memory ranges are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * unmapped that were originally mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct spi_transfer *tfr = bs->tfr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!bs->tx_prologue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (bs->rx_prologue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (!bs->tx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (likely(!bs->tx_spillover)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) sg_dma_len(&tfr->tx_sg.sgl[1]) += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) bs->tx_prologue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * bcm2835_spi_dma_rx_done() - callback for DMA RX channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * @data: SPI master controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * Used for bidirectional and RX-only transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static void bcm2835_spi_dma_rx_done(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct spi_controller *ctlr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* terminate tx-dma as we do not have an irq for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * because when the rx dma will terminate and this callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * is called the tx-dma must have finished - can't get to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * situation otherwise...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) dmaengine_terminate_async(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) bs->tx_dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) bs->rx_dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) bcm2835_spi_undo_prologue(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* reset fifo and HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) bcm2835_spi_reset_hw(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* and mark as completed */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) complete(&ctlr->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * bcm2835_spi_dma_tx_done() - callback for DMA TX channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * @data: SPI master controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Used for TX-only transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static void bcm2835_spi_dma_tx_done(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct spi_controller *ctlr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* busy-wait for TX FIFO to empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) bcm2835_wr(bs, BCM2835_SPI_CS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) bs->clear_rx_cs[bs->chip_select]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) bs->tx_dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * In case of a very short transfer, RX DMA may not have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * issued yet. The onus is then on bcm2835_spi_transfer_one_dma()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * to terminate it immediately after issuing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (cmpxchg(&bs->rx_dma_active, true, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dmaengine_terminate_async(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) bcm2835_spi_undo_prologue(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) bcm2835_spi_reset_hw(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) complete(&ctlr->xfer_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * @ctlr: SPI master controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * @spi: SPI slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * @tfr: SPI transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * @bs: BCM2835 SPI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * @is_tx: whether to submit DMA descriptor for TX or RX sglist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Return 0 on success or a negative error number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct spi_transfer *tfr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct bcm2835_spi *bs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) bool is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct scatterlist *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) unsigned int nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) enum dma_transfer_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) dir = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) chan = ctlr->dma_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) nents = tfr->tx_sg.nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) sgl = tfr->tx_sg.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) dir = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) chan = ctlr->dma_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) nents = tfr->rx_sg.nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) sgl = tfr->rx_sg.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) flags = DMA_PREP_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* prepare the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * Completion is signaled by the RX channel for bidirectional and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * RX-only transfers; else by the TX channel for TX-only transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (!is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) desc->callback = bcm2835_spi_dma_rx_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) desc->callback_param = ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) } else if (!tfr->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) desc->callback = bcm2835_spi_dma_tx_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) desc->callback_param = ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) bs->chip_select = spi->chip_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* submit it to DMA-engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return dma_submit_error(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * @ctlr: SPI master controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * @spi: SPI slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * @tfr: SPI transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * @cs: CS register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * the TX and RX DMA channel to copy between memory and FIFO register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * For *TX-only* transfers (rx_buf is %NULL), copying the RX FIFO's contents to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * memory is pointless. However not reading the RX FIFO isn't an option either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * because transmission is halted once it's full. As a workaround, cyclically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * clear the RX FIFO by setting the CLEAR_RX bit in the CS register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * The CS register value is precalculated in bcm2835_spi_setup(). Normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * this is called only once, on slave registration. A DMA descriptor to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * this value is preallocated in bcm2835_dma_init(). All that's left to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * when performing a TX-only transfer is to submit this descriptor to the RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * DMA channel. Latency is thereby minimized. The descriptor does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * generate any interrupts while running. It must be terminated once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * TX DMA channel is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * Clearing the RX FIFO is paced by the DREQ signal. The signal is asserted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * when the RX FIFO becomes half full, i.e. 32 bytes. (Tuneable with the DC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * register.) Reading 32 bytes from the RX FIFO would normally require 8 bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * accesses, whereas clearing it requires only 1 bus access. So an 8-fold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * reduction in bus traffic and thus energy consumption is achieved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * For *RX-only* transfers (tx_buf is %NULL), fill the TX FIFO by cyclically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * copying from the zero page. The DMA descriptor to do this is preallocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * in bcm2835_dma_init(). It must be terminated once the RX DMA channel is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * done and can then be reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * The BCM2835 DMA driver autodetects when a transaction copies from the zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * page and utilizes the DMA controller's ability to synthesize zeroes instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * of copying them from memory. This reduces traffic on the memory bus. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * feature is not available on so-called "lite" channels, but normally TX DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * is backed by a full-featured channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * Zero-filling the TX FIFO is paced by the DREQ signal. Unfortunately the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * BCM2835 SPI controller continues to assert DREQ even after the DLEN register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * has been counted down to zero (hardware erratum). Thus, when the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * has finished, the DMA engine zero-fills the TX FIFO until it is half full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * (Tuneable with the DC register.) So up to 9 gratuitous bus accesses are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * performed at the end of an RX-only transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct spi_transfer *tfr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) u32 cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* update usage statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) bs->count_transfer_dma++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * Transfer first few bytes without DMA if length of first TX or RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * sglist entry is not a multiple of 4 bytes (hardware limitation).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* setup tx-DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (bs->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) cookie = dmaengine_submit(bs->fill_tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ret = dma_submit_error(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) goto err_reset_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* set the DMA length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /* start the HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) bcm2835_wr(bs, BCM2835_SPI_CS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) bs->tx_dma_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /* start TX early */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) dma_async_issue_pending(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* setup rx-DMA late - to run transfers while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * mapping of the rx buffers still takes place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * this saves 10us or more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (bs->rx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) cookie = dmaengine_submit(bs->clear_rx_desc[spi->chip_select]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ret = dma_submit_error(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /* need to reset on errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) dmaengine_terminate_sync(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) bs->tx_dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) goto err_reset_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /* start rx dma late */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) dma_async_issue_pending(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) bs->rx_dma_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * In case of a very short TX-only transfer, bcm2835_spi_dma_tx_done()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * may run before RX DMA is issued. Terminate RX DMA if so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (!bs->rx_buf && !bs->tx_dma_active &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) cmpxchg(&bs->rx_dma_active, true, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) dmaengine_terminate_async(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) bcm2835_spi_reset_hw(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /* wait for wakeup in framework */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) err_reset_hw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) bcm2835_spi_reset_hw(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) bcm2835_spi_undo_prologue(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct spi_transfer *tfr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /* we start DMA efforts only on bigger transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* return OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) static void bcm2835_dma_release(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct bcm2835_spi *bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (ctlr->dma_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) dmaengine_terminate_sync(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (bs->fill_tx_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) dmaengine_desc_free(bs->fill_tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (bs->fill_tx_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dma_unmap_page_attrs(ctlr->dma_tx->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) bs->fill_tx_addr, sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) DMA_TO_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) DMA_ATTR_SKIP_CPU_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) dma_release_channel(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ctlr->dma_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (ctlr->dma_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) dmaengine_terminate_sync(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) for (i = 0; i < BCM2835_SPI_NUM_CS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (bs->clear_rx_desc[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) dmaengine_desc_free(bs->clear_rx_desc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (bs->clear_rx_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) dma_unmap_single(ctlr->dma_rx->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) bs->clear_rx_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) sizeof(bs->clear_rx_cs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) dma_release_channel(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ctlr->dma_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct bcm2835_spi *bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct dma_slave_config slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) const __be32 *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) dma_addr_t dma_reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /* base address in dma-space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) dev_err(dev, "could not get DMA-register address - not using dma mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* Fall back to interrupt mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) dma_reg_base = be32_to_cpup(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* get tx/rx dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ctlr->dma_tx = dma_request_chan(dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (IS_ERR(ctlr->dma_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ret = PTR_ERR(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ctlr->dma_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ctlr->dma_rx = dma_request_chan(dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (IS_ERR(ctlr->dma_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) ret = PTR_ERR(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ctlr->dma_rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) goto err_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * The TX DMA channel either copies a transfer's TX buffer to the FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * or, in case of an RX-only transfer, cyclically copies from the zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * page to the FIFO using a preallocated, reusable descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) goto err_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ZERO_PAGE(0), 0, sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) DMA_TO_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) DMA_ATTR_SKIP_CPU_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) dev_err(dev, "cannot map zero page - not using DMA mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) bs->fill_tx_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) goto err_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) bs->fill_tx_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) sizeof(u32), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) DMA_MEM_TO_DEV, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (!bs->fill_tx_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) goto err_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) ret = dmaengine_desc_set_reuse(bs->fill_tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto err_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * The RX DMA channel is used bidirectionally: It either reads the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * RX FIFO or, in case of a TX-only transfer, cyclically writes a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * precalculated value to the CS register to clear the RX FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) goto err_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) bs->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) bs->clear_rx_cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) sizeof(bs->clear_rx_cs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (dma_mapping_error(ctlr->dma_rx->device->dev, bs->clear_rx_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) dev_err(dev, "cannot map clear_rx_cs - not using DMA mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) bs->clear_rx_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) goto err_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) for (i = 0; i < BCM2835_SPI_NUM_CS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) bs->clear_rx_desc[i] = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) bs->clear_rx_addr + i * sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) sizeof(u32), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) DMA_MEM_TO_DEV, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (!bs->clear_rx_desc[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) dev_err(dev, "cannot prepare clear_rx_desc - not using DMA mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) goto err_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) ret = dmaengine_desc_set_reuse(bs->clear_rx_desc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) dev_err(dev, "cannot reuse clear_rx_desc - not using DMA mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) goto err_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /* all went well, so set can_dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) ctlr->can_dma = bcm2835_spi_can_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) err_config:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) err_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) bcm2835_dma_release(ctlr, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * Only report error for deferred probing, otherwise fall back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * interrupt mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct spi_transfer *tfr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) u32 cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /* update usage statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) bs->count_transfer_polling++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* enable HW block without interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /* fill in the fifo before timeout calculations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * if we are interrupted here, then the data is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * getting transferred by the HW while we are interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* set the timeout to at least 2 jiffies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /* loop until finished the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) while (bs->rx_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* fill in tx fifo with remaining data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) bcm2835_wr_fifo(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* read from fifo as much as possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) bcm2835_rd_fifo(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /* if there is still data pending to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * then check the timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (bs->rx_len && time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) dev_dbg_ratelimited(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) jiffies - timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) bs->tx_len, bs->rx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /* fall back to interrupt mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* update usage statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) bs->count_transfer_irq_after_polling++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return bcm2835_spi_transfer_one_irq(ctlr, spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) tfr, cs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) /* Transfer complete - reset SPI HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) bcm2835_spi_reset_hw(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /* and return without waiting for completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct spi_device *spi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct spi_transfer *tfr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) unsigned long spi_hz, cdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) unsigned long hz_per_byte, byte_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) u32 cs = bs->prepare_cs[spi->chip_select];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* set clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) spi_hz = tfr->speed_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (spi_hz >= bs->clk_hz / 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) cdiv = 2; /* clk_hz/2 is the fastest we can go */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) } else if (spi_hz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* CDIV must be a multiple of two */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) cdiv += (cdiv % 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (cdiv >= 65536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) cdiv = 0; /* 0 is the slowest we can go */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) cdiv = 0; /* 0 is the slowest we can go */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* handle all the 3-wire mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (spi->mode & SPI_3WIRE && tfr->rx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) cs |= BCM2835_SPI_CS_REN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* set transmit buffers and length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) bs->tx_buf = tfr->tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) bs->rx_buf = tfr->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) bs->tx_len = tfr->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) bs->rx_len = tfr->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /* Calculate the estimated time in us the transfer runs. Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * there is 1 idle clocks cycles after each byte getting transferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * so we have 9 cycles/byte. This is used to find the number of Hz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * per byte per polling limit. E.g., we can transfer 1 byte in 30 us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * per 300,000 Hz of bus clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /* run in polling mode for short transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (tfr->len < byte_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /* run in dma mode if conditions are right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * Note that unlike poll or interrupt mode DMA mode does not have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * this 1 idle clock cycle pattern but runs the spi clock without gaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return bcm2835_spi_transfer_one_dma(ctlr, spi, tfr, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /* run in interrupt-mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct spi_device *spi = msg->spi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (ctlr->can_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * DMA transfers are limited to 16 bit (0 to 65535 bytes) by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * the SPI HW due to DLEN. Split up transfers (32-bit FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * aligned) if the limit is exceeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ret = spi_split_transfers_maxsize(ctlr, msg, 65532,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * Set up clock polarity before spi_transfer_one_message() asserts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * chip select to avoid a gratuitous clock signal edge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) bcm2835_wr(bs, BCM2835_SPI_CS, bs->prepare_cs[spi->chip_select]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct spi_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* if an error occurred and we have an active dma, then terminate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) dmaengine_terminate_sync(ctlr->dma_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) bs->tx_dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) dmaengine_terminate_sync(ctlr->dma_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) bs->rx_dma_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) bcm2835_spi_undo_prologue(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* and reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) bcm2835_spi_reset_hw(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static int chip_match_name(struct gpio_chip *chip, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return !strcmp(chip->label, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static int bcm2835_spi_setup(struct spi_device *spi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct spi_controller *ctlr = spi->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) struct gpio_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) u32 cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (spi->chip_select >= BCM2835_SPI_NUM_CS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) dev_err(&spi->dev, "only %d chip-selects supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) BCM2835_SPI_NUM_CS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * Precalculate SPI slave's CS register value for ->prepare_message():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * The driver always uses software-controlled GPIO chip select, hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * set the hardware-controlled native chip select to an invalid value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * to prevent it from interfering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) cs = BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (spi->mode & SPI_CPOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) cs |= BCM2835_SPI_CS_CPOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (spi->mode & SPI_CPHA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) cs |= BCM2835_SPI_CS_CPHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) bs->prepare_cs[spi->chip_select] = cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * Precalculate SPI slave's CS register value to clear RX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * in case of a TX-only DMA transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (ctlr->dma_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) bs->clear_rx_cs[spi->chip_select] = cs |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) BCM2835_SPI_CS_TA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) BCM2835_SPI_CS_DMAEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) BCM2835_SPI_CS_CLEAR_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) dma_sync_single_for_device(ctlr->dma_rx->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) bs->clear_rx_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) sizeof(bs->clear_rx_cs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * sanity checking the native-chipselects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (spi->mode & SPI_NO_CS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * The SPI core has successfully requested the CS GPIO line from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * device tree, so we are done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (spi->cs_gpiod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (spi->chip_select > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* error in the case of native CS requested with CS > 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * officially there is a CS2, but it is not documented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * which GPIO is connected with that...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) dev_err(&spi->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) "setup: only two native chip-selects are supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * Translate native CS to GPIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * FIXME: poking around in the gpiolib internals like this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * not very good practice. Find a way to locate the real problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * and fix it. Why is the GPIO descriptor in spi->cs_gpiod
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * sometimes not assigned correctly? Erroneous device trees?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* get the gpio chip for the base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (!chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) GPIO_LOOKUP_FLAGS_DEFAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (IS_ERR(spi->cs_gpiod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return PTR_ERR(spi->cs_gpiod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /* and set up the "mode" and level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) spi->chip_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static int bcm2835_spi_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct spi_controller *ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct bcm2835_spi *bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) ctlr = devm_spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) dma_get_cache_alignment()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (!ctlr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) platform_set_drvdata(pdev, ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) ctlr->use_gpio_descriptors = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) ctlr->num_chipselect = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) ctlr->setup = bcm2835_spi_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) ctlr->transfer_one = bcm2835_spi_transfer_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) ctlr->handle_err = bcm2835_spi_handle_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) ctlr->prepare_message = bcm2835_spi_prepare_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) ctlr->dev.of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) bs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) bs->ctlr = ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) bs->regs = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (IS_ERR(bs->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return PTR_ERR(bs->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) bs->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (IS_ERR(bs->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) "could not get clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) bs->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (bs->irq <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return bs->irq ? bs->irq : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) clk_prepare_enable(bs->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) bs->clk_hz = clk_get_rate(bs->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) goto out_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /* initialise the hardware with the default polarities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) bcm2835_wr(bs, BCM2835_SPI_CS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) dev_name(&pdev->dev), bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) goto out_dma_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) err = spi_register_controller(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) dev_err(&pdev->dev, "could not register SPI controller: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) goto out_dma_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) out_dma_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) bcm2835_dma_release(ctlr, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) out_clk_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) clk_disable_unprepare(bs->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) static int bcm2835_spi_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) struct spi_controller *ctlr = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) bcm2835_debugfs_remove(bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) spi_unregister_controller(ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) bcm2835_dma_release(ctlr, bs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /* Clear FIFOs, and disable the HW block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) bcm2835_wr(bs, BCM2835_SPI_CS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) clk_disable_unprepare(bs->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static void bcm2835_spi_shutdown(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) ret = bcm2835_spi_remove(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) dev_err(&pdev->dev, "failed to shutdown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) static const struct of_device_id bcm2835_spi_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) { .compatible = "brcm,bcm2835-spi", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static struct platform_driver bcm2835_spi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) .of_match_table = bcm2835_spi_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) .probe = bcm2835_spi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) .remove = bcm2835_spi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) .shutdown = bcm2835_spi_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) module_platform_driver(bcm2835_spi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) MODULE_AUTHOR("Chris Boot <bootc@bootc.net>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) MODULE_LICENSE("GPL");