^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Core driver for the High Speed UART DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2015 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Partially based on the bits found in drivers/tty/serial/mfd.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * DMA channel allocation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Write (UART RX).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * port 3, and so on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "hsu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define HSU_DMA_BUSWIDTHS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) hsu_chan_writel(hsuc, HSU_CH_CR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 cr = HSU_CH_CR_CHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (hsuc->direction == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) cr &= ~HSU_CH_CR_CHD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) else if (hsuc->direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) cr |= HSU_CH_CR_CHD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) hsu_chan_writel(hsuc, HSU_CH_CR, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct dma_slave_config *config = &hsuc->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct hsu_dma_desc *desc = hsuc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u32 bsr = 0, mtsr = 0; /* to shut the compiler up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned int i, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (hsuc->direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) bsr = config->dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mtsr = config->dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) } else if (hsuc->direction == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) bsr = config->src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) mtsr = config->src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) hsu_chan_disable(hsuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* Set descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) count = desc->nents - desc->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* Prepare value for DCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) dcr |= HSU_CH_DCR_DESCA(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) dcr |= HSU_CH_DCR_CHTOI(i); /* timeout bit, see HSU Errata 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) desc->active++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Only for the last descriptor in the chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) dcr |= HSU_CH_DCR_CHSOD(count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) dcr |= HSU_CH_DCR_CHDI(count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) hsu_chan_enable(hsuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) hsu_chan_disable(hsuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) hsu_dma_chan_start(hsuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct virt_dma_desc *vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Get the next descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) vdesc = vchan_next_desc(&hsuc->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (!vdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) hsuc->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) list_del(&vdesc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) hsuc->desc = to_hsu_dma_desc(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Start the channel with a new descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) hsu_dma_start_channel(hsuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * hsu_dma_get_status() - get DMA channel status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * @chip: HSUART DMA chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * @nr: DMA channel number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * @status: pointer for DMA Channel Status Register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * The function reads and clears the DMA Channel Status Register, checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * if it was a timeout interrupt and returns a corresponding value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Caller should provide a valid pointer for the DMA Channel Status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Register value that will be returned in @status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * 1 for DMA timeout status, 0 for other DMA status, or error code for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * invalid parameters or no interrupt pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u32 *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct hsu_dma_chan *hsuc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u32 sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (nr >= chip->hsu->nr_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) hsuc = &chip->hsu->chan[nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * No matter what situation, need read clear the IRQ status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * There is a bug, see Errata 5, HSD 2900918
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) spin_lock_irqsave(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) sr = hsu_chan_readl(hsuc, HSU_CH_SR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Check if any interrupt is pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Timeout IRQ, need wait some time, see Errata 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (sr & HSU_CH_SR_DESCTO_ANY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * At this point, at least one of Descriptor Time Out, Channel Error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * or Descriptor Done bits must be set. Clear the Descriptor Time Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * bits and if sr is still non-zero, it must be channel error or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * descriptor done which are higher priority than timeout and handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * in hsu_dma_do_irq(). Else, it must be a timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) sr &= ~HSU_CH_SR_DESCTO_ANY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) *status = sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return sr ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) EXPORT_SYMBOL_GPL(hsu_dma_get_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * hsu_dma_do_irq() - DMA interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @chip: HSUART DMA chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @nr: DMA channel number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @status: Channel Status Register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * This function handles Channel Error and Descriptor Done interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * This function should be called after determining that the DMA interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * 0 for invalid channel number, 1 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct hsu_dma_chan *hsuc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct hsu_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (nr >= chip->hsu->nr_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) hsuc = &chip->hsu->chan[nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) spin_lock_irqsave(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) desc = hsuc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (status & HSU_CH_SR_CHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) desc->status = DMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) } else if (desc->active < desc->nents) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) hsu_dma_start_channel(hsuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) vchan_cookie_complete(&desc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) desc->status = DMA_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) hsu_dma_start_transfer(hsuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct hsu_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (!desc->sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) kfree(desc->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned int sg_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct hsu_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) desc = hsu_dma_alloc_desc(sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) desc->sg[i].addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) desc->sg[i].len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) desc->length += sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) desc->nents = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) desc->direction = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* desc->active = 0 by kzalloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) desc->status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void hsu_dma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) spin_lock_irqsave(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) hsu_dma_start_transfer(hsuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct hsu_dma_desc *desc = hsuc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) size_t bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) for (i = desc->active; i < desc->nents; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bytes += desc->sg[i].len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) i = HSU_DMA_CHAN_NR_DESC - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) } while (--i >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) dma_cookie_t cookie, struct dma_tx_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct virt_dma_desc *vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) size_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) status = dma_cookie_status(chan, cookie, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (status == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) spin_lock_irqsave(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) vdesc = vchan_find_desc(&hsuc->vchan, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) bytes = hsu_dma_active_desc_size(hsuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) dma_set_residue(state, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) status = hsuc->desc->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) } else if (vdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) bytes = to_hsu_dma_desc(vdesc)->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dma_set_residue(state, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static int hsu_dma_slave_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) memcpy(&hsuc->config, config, sizeof(hsuc->config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static int hsu_dma_pause(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) spin_lock_irqsave(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) hsu_chan_disable(hsuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) hsuc->desc->status = DMA_PAUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static int hsu_dma_resume(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) spin_lock_irqsave(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) hsuc->desc->status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) hsu_chan_enable(hsuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int hsu_dma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) spin_lock_irqsave(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) hsu_dma_stop_channel(hsuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (hsuc->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) hsu_dma_desc_free(&hsuc->desc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) hsuc->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) vchan_get_all_descriptors(&hsuc->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) vchan_dma_desc_free_list(&hsuc->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static void hsu_dma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) vchan_free_chan_resources(to_virt_chan(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static void hsu_dma_synchronize(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) vchan_synchronize(&hsuc->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int hsu_dma_probe(struct hsu_dma_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct hsu_dma *hsu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) void __iomem *addr = chip->regs + chip->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) unsigned short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (!hsu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) chip->hsu = hsu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* Calculate nr_channels from the IO space length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) sizeof(*hsu->chan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!hsu->chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) INIT_LIST_HEAD(&hsu->dma.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) for (i = 0; i < hsu->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct hsu_dma_chan *hsuc = &hsu->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) hsuc->vchan.desc_free = hsu_dma_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) vchan_init(&hsuc->vchan, &hsu->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) hsu->dma.device_issue_pending = hsu_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) hsu->dma.device_tx_status = hsu_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) hsu->dma.device_config = hsu_dma_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) hsu->dma.device_pause = hsu_dma_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) hsu->dma.device_resume = hsu_dma_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) hsu->dma.device_terminate_all = hsu_dma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) hsu->dma.device_synchronize = hsu_dma_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) hsu->dma.dev = chip->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ret = dma_async_device_register(&hsu->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) EXPORT_SYMBOL_GPL(hsu_dma_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int hsu_dma_remove(struct hsu_dma_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) struct hsu_dma *hsu = chip->hsu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) unsigned short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) dma_async_device_unregister(&hsu->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) for (i = 0; i < hsu->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct hsu_dma_chan *hsuc = &hsu->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) tasklet_kill(&hsuc->vchan.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) EXPORT_SYMBOL_GPL(hsu_dma_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) MODULE_DESCRIPTION("High Speed UART DMA core driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");