^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * DMA driver for Nvidia's Tegra20 APB DMA controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <trace/events/tegra_apb_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define TEGRA_APBDMA_GENERAL 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define TEGRA_APBDMA_CONTROL 0x010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define TEGRA_APBDMA_IRQ_MASK 0x01c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define TEGRA_APBDMA_IRQ_MASK_SET 0x020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* CSR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define TEGRA_APBDMA_CHAN_CSR 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define TEGRA_APBDMA_CSR_ENB BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define TEGRA_APBDMA_CSR_HOLD BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define TEGRA_APBDMA_CSR_DIR BIT(28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define TEGRA_APBDMA_CSR_ONCE BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define TEGRA_APBDMA_CSR_FLOW BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define TEGRA_APBDMA_CSR_REQ_SEL_MASK 0x1F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* STATUS register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define TEGRA_APBDMA_CHAN_STATUS 0x004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define TEGRA_APBDMA_STATUS_BUSY BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define TEGRA_APBDMA_STATUS_HALT BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define TEGRA_APBDMA_CHAN_CSRE 0x00C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define TEGRA_APBDMA_CHAN_CSRE_PAUSE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* AHB memory address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define TEGRA_APBDMA_CHAN_AHBPTR 0x010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* AHB sequence register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* APB address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define TEGRA_APBDMA_CHAN_APBPTR 0x018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* APB sequence register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* Tegra148 specific registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define TEGRA_APBDMA_CHAN_WCOUNT 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * If any burst is in flight and DMA paused then this is the time to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * on-flight burst and update DMA status register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Channel base address offset from APBDMA base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define TEGRA_APBDMA_SLAVE_ID_INVALID (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct tegra_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * tegra_dma_chip_data Tegra chip specific DMA data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @nr_channels: Number of channels available in the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @channel_reg_size: Channel register size/stride.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @support_channel_pause: Support channel wise pause of dma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * @support_separate_wcount_reg: Support separate word count register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct tegra_dma_chip_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned int nr_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned int channel_reg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned int max_dma_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) bool support_channel_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) bool support_separate_wcount_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* DMA channel registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct tegra_dma_channel_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u32 ahb_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u32 apb_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u32 ahb_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u32 apb_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u32 wcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * tegra_dma_sg_req: DMA request details to configure hardware. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * contains the details for one transfer to configure DMA hw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * The client's request for data transfer can be broken into multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * sub-transfer as per requester details and hw support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * This sub transfer get added in the list of transfer and point to Tegra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * DMA descriptor which manages the transfer details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct tegra_dma_sg_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct tegra_dma_channel_regs ch_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned int req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) bool configured;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) bool last_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct tegra_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned int words_xferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * This descriptor keep track of transfer status, callbacks and request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * counts etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct tegra_dma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct dma_async_tx_descriptor txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned int bytes_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned int bytes_transferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) enum dma_status dma_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct list_head tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct list_head cb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned int cb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct tegra_dma_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bool to_terminate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* tegra_dma_channel: Channel specific information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct tegra_dma_channel {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct dma_chan dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) char name[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) bool config_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void __iomem *chan_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) bool busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct tegra_dma *tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Different lists for managing the requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct list_head free_sg_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct list_head pending_sg_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct list_head free_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct list_head cb_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* ISR handler and tasklet for bottom half of isr handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dma_isr_handler isr_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Channel-slave specific configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned int slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct dma_slave_config dma_sconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct tegra_dma_channel_regs channel_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct wait_queue_head wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* tegra_dma: Tegra DMA specific information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct tegra_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct dma_device dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct clk *dma_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct reset_control *rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) spinlock_t global_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void __iomem *base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) const struct tegra_dma_chip_data *chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * Counter for managing global pausing of the DMA controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Only applicable for devices that don't support individual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * channel pausing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u32 global_pause_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* Last member of the structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct tegra_dma_channel channels[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) writel(val, tdma->base_addr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return readl(tdma->base_addr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static inline void tdc_write(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) writel(val, tdc->chan_addr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return readl(tdc->chan_addr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return container_of(dc, struct tegra_dma_channel, dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static inline struct tegra_dma_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) txd_to_tegra_dma_desc(struct dma_async_tx_descriptor *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return container_of(td, struct tegra_dma_desc, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return &tdc->dma_chan.dev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Get DMA desc from free list, if not there then allocate it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct tegra_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spin_lock_irqsave(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Do not allocate if desc are waiting for ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) list_del(&dma_desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) dma_desc->txd.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Allocate DMA desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!dma_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dma_desc->txd.tx_submit = tegra_dma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) dma_desc->txd.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct tegra_dma_desc *dma_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) spin_lock_irqsave(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (!list_empty(&dma_desc->tx_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static struct tegra_dma_sg_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) tegra_dma_sg_req_get(struct tegra_dma_channel *tdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct tegra_dma_sg_req *sg_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) spin_lock_irqsave(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!list_empty(&tdc->free_sg_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) list_del(&sg_req->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return sg_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) sg_req = kzalloc(sizeof(*sg_req), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return sg_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static int tegra_dma_slave_config(struct dma_chan *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct dma_slave_config *sconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!list_empty(&tdc->pending_sg_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) dev_err(tdc2dev(tdc), "Configuration not allowed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) sconfig->device_fc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) tdc->slave_id = sconfig->slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) tdc->config_init = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) bool wait_for_burst_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct tegra_dma *tdma = tdc->tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) spin_lock(&tdma->global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (tdc->tdma->global_pause_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (wait_for_burst_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) tdc->tdma->global_pause_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) spin_unlock(&tdma->global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct tegra_dma *tdma = tdc->tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) spin_lock(&tdma->global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (WARN_ON(tdc->tdma->global_pause_count == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (--tdc->tdma->global_pause_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) tdma_write(tdma, TEGRA_APBDMA_GENERAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) TEGRA_APBDMA_GENERAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) spin_unlock(&tdma->global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static void tegra_dma_pause(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) bool wait_for_burst_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct tegra_dma *tdma = tdc->tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (tdma->chip_data->support_channel_pause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) TEGRA_APBDMA_CHAN_CSRE_PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (wait_for_burst_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) tegra_dma_global_pause(tdc, wait_for_burst_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static void tegra_dma_resume(struct tegra_dma_channel *tdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct tegra_dma *tdma = tdc->tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (tdma->chip_data->support_channel_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) tegra_dma_global_resume(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void tegra_dma_stop(struct tegra_dma_channel *tdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) u32 csr, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* Disable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) csr &= ~TEGRA_APBDMA_CSR_ENB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /* Clear interrupt status if it is there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) tdc->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static void tegra_dma_start(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct tegra_dma_sg_req *sg_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (tdc->tdma->chip_data->support_separate_wcount_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* Start DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct tegra_dma_sg_req *nsg_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) unsigned long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * The DMA controller reloads the new configuration for next transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * after last burst of current transfer completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * If there is no IEC status then this makes sure that last burst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * has not be completed. There may be case that last burst is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * flight and so it can complete but because DMA is paused, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * will not generates interrupt as well as not reload the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * If there is already IEC status then interrupt handler need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * load new configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) tegra_dma_pause(tdc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * If interrupt is pending then do nothing as the ISR will handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * the programing for new request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) dev_err(tdc2dev(tdc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) "Skipping new configuration as interrupt is pending\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) tegra_dma_resume(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* Safe to program new configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (tdc->tdma->chip_data->support_separate_wcount_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) nsg_req->ch_regs.wcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) nsg_req->configured = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) nsg_req->words_xferred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) tegra_dma_resume(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static void tdc_start_head_req(struct tegra_dma_channel *tdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct tegra_dma_sg_req *sg_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) tegra_dma_start(tdc, sg_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) sg_req->configured = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) sg_req->words_xferred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) tdc->busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct tegra_dma_sg_req *hsgreq, *hnsgreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) hnsgreq = list_first_entry(&hsgreq->node, typeof(*hnsgreq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) tegra_dma_configure_for_next(tdc, hnsgreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static inline unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) get_current_xferred_count(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct tegra_dma_sg_req *sg_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) unsigned long status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct tegra_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct tegra_dma_sg_req *sgreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) while (!list_empty(&tdc->pending_sg_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) list_move_tail(&sgreq->node, &tdc->free_sg_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (sgreq->last_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) dma_desc = sgreq->dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dma_desc->dma_status = DMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* Add in cb list if it is not there. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (!dma_desc->cb_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) list_add_tail(&dma_desc->cb_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) &tdc->cb_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) dma_desc->cb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) tdc->isr_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) bool to_terminate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct tegra_dma_sg_req *hsgreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * Check that head req on list should be in flight.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * If it is not in flight then abort transfer as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * looping of transfer can not continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (!hsgreq->configured) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) tegra_dma_stop(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) pm_runtime_put(tdc->tdma->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) tegra_dma_abort_all(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* Configure next request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!to_terminate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) tdc_configure_next_head_desc(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static void handle_once_dma_done(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) bool to_terminate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct tegra_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct tegra_dma_sg_req *sgreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) tdc->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) dma_desc = sgreq->dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) dma_desc->bytes_transferred += sgreq->req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) list_del(&sgreq->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (sgreq->last_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) dma_desc->dma_status = DMA_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) dma_cookie_complete(&dma_desc->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (!dma_desc->cb_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) dma_desc->cb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) list_add_tail(&sgreq->node, &tdc->free_sg_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Do not start DMA if it is going to be terminate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (to_terminate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (list_empty(&tdc->pending_sg_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) pm_runtime_put(tdc->tdma->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) tdc_start_head_req(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) bool to_terminate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct tegra_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct tegra_dma_sg_req *sgreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) bool st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dma_desc = sgreq->dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* if we dma for long enough the transfer count will wrap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) dma_desc->bytes_transferred =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) (dma_desc->bytes_transferred + sgreq->req_len) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) dma_desc->bytes_requested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* Callback need to be call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (!dma_desc->cb_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) dma_desc->cb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) sgreq->words_xferred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* If not last req then put at end of pending list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) list_move_tail(&sgreq->node, &tdc->pending_sg_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) sgreq->configured = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) st = handle_continuous_head_request(tdc, to_terminate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dma_desc->dma_status = DMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static void tegra_dma_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct tegra_dma_channel *tdc = from_tasklet(tdc, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct dmaengine_desc_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct tegra_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) unsigned int cb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) spin_lock_irqsave(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) while (!list_empty(&tdc->cb_desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) cb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) list_del(&dma_desc->cb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) dmaengine_desc_get_callback(&dma_desc->txd, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) cb_count = dma_desc->cb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dma_desc->cb_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) cb.callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) while (cb_count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) dmaengine_desc_callback_invoke(&cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) spin_lock_irqsave(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct tegra_dma_channel *tdc = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) spin_lock(&tdc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) trace_tegra_dma_isr(&tdc->dma_chan, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) tdc->isr_handler(tdc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) tasklet_schedule(&tdc->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) wake_up_all(&tdc->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) spin_unlock(&tdc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) spin_unlock(&tdc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) spin_lock_irqsave(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) dma_desc->dma_status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) cookie = dma_cookie_assign(&dma_desc->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static void tegra_dma_issue_pending(struct dma_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) spin_lock_irqsave(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (list_empty(&tdc->pending_sg_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) dev_err(tdc2dev(tdc), "No DMA request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (!tdc->busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) err = pm_runtime_resume_and_get(tdc->tdma->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) tdc_start_head_req(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Continuous single mode: Configure next req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (tdc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * Wait for 1 burst time for configure DMA for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * next transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) tdc_configure_next_head_desc(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static int tegra_dma_terminate_all(struct dma_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct tegra_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct tegra_dma_sg_req *sgreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) u32 status, wcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) bool was_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) spin_lock_irqsave(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (!tdc->busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) goto skip_dma_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* Pause DMA before checking the queue status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) tegra_dma_pause(tdc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) tdc->isr_handler(tdc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (tdc->tdma->chip_data->support_separate_wcount_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) wcount = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) was_busy = tdc->busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) tegra_dma_stop(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!list_empty(&tdc->pending_sg_req) && was_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) sgreq->dma_desc->bytes_transferred +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) get_current_xferred_count(tdc, sgreq, wcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) tegra_dma_resume(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) pm_runtime_put(tdc->tdma->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) wake_up_all(&tdc->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) skip_dma_stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) tegra_dma_abort_all(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) while (!list_empty(&tdc->cb_desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) cb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) list_del(&dma_desc->cb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) dma_desc->cb_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) spin_lock_irqsave(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return !(status & TEGRA_APBDMA_STATUS_ISE_EOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static void tegra_dma_synchronize(struct dma_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) err = pm_runtime_resume_and_get(tdc->tdma->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * CPU, which handles interrupt, could be busy in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * uninterruptible state, in this case sibling CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * should wait until interrupt is handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) tasklet_kill(&tdc->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) pm_runtime_put(tdc->tdma->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct tegra_dma_sg_req *sg_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) u32 status, wcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (tdc->tdma->chip_data->support_separate_wcount_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (!tdc->tdma->chip_data->support_separate_wcount_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) wcount = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (status & TEGRA_APBDMA_STATUS_ISE_EOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return sg_req->req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) wcount = get_current_xferred_count(tdc, sg_req, wcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (!wcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * If wcount wasn't ever polled for this SG before, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * simply assume that transfer hasn't started yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * Otherwise it's the end of the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * The alternative would be to poll the status register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * until EOC bit is set or wcount goes UP. That's so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * because EOC bit is getting set only after the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * burst's completion and counter is less than the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * transfer size by 4 bytes. The counter value wraps around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * in a cyclic mode before EOC is set(!), so we can't easily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * distinguish start of transfer from its end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (sg_req->words_xferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) wcount = sg_req->req_len - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) } else if (wcount < sg_req->words_xferred) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * This case will never happen for a non-cyclic transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * For a cyclic transfer, although it is possible for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * next transfer to have already started (resetting the word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * count), this case should still not happen because we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * have detected that the EOC bit is set and hence the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * was completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) wcount = sg_req->req_len - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) sg_req->words_xferred = wcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return wcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct tegra_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct tegra_dma_sg_req *sg_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) unsigned int residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) unsigned int bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) ret = dma_cookie_status(dc, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) spin_lock_irqsave(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /* Check on wait_ack desc status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (dma_desc->txd.cookie == cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) ret = dma_desc->dma_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* Check in pending list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) dma_desc = sg_req->dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (dma_desc->txd.cookie == cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ret = dma_desc->dma_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) dma_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (dma_desc && txstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) residual = dma_desc->bytes_requested -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ((dma_desc->bytes_transferred + bytes) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) dma_desc->bytes_requested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) dma_set_residue(txstate, residual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) enum dma_slave_buswidth slave_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) switch (slave_bw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) case DMA_SLAVE_BUSWIDTH_1_BYTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) case DMA_SLAVE_BUSWIDTH_2_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) case DMA_SLAVE_BUSWIDTH_4_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) case DMA_SLAVE_BUSWIDTH_8_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) dev_warn(tdc2dev(tdc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) "slave bw is not supported, using 32bits\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) u32 burst_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) enum dma_slave_buswidth slave_bw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) unsigned int burst_byte, burst_ahb_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * burst_size from client is in terms of the bus_width.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * convert them into AHB memory width which is 4 byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) burst_byte = burst_size * slave_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) burst_ahb_width = burst_byte / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* If burst size is 0 then calculate the burst size based on length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (!burst_ahb_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (len & 0xF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return TEGRA_APBDMA_AHBSEQ_BURST_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) else if ((len >> 4) & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return TEGRA_APBDMA_AHBSEQ_BURST_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return TEGRA_APBDMA_AHBSEQ_BURST_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (burst_ahb_width < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return TEGRA_APBDMA_AHBSEQ_BURST_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) else if (burst_ahb_width < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return TEGRA_APBDMA_AHBSEQ_BURST_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return TEGRA_APBDMA_AHBSEQ_BURST_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static int get_transfer_param(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) u32 *apb_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) u32 *apb_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) u32 *csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) unsigned int *burst_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) enum dma_slave_buswidth *slave_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) switch (direction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) *apb_addr = tdc->dma_sconfig.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) *burst_size = tdc->dma_sconfig.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) *slave_bw = tdc->dma_sconfig.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) *csr = TEGRA_APBDMA_CSR_DIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) *apb_addr = tdc->dma_sconfig.src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) *burst_size = tdc->dma_sconfig.src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) *slave_bw = tdc->dma_sconfig.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) *csr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct tegra_dma_channel_regs *ch_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) u32 len_field = (len - 4) & 0xFFFC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (tdc->tdma->chip_data->support_separate_wcount_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) ch_regs->wcount = len_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ch_regs->csr |= len_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) tegra_dma_prep_slave_sg(struct dma_chan *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct tegra_dma_sg_req *sg_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) u32 csr, ahb_seq, apb_ptr, apb_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) enum dma_slave_buswidth slave_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct tegra_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct list_head req_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) unsigned int burst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (!tdc->config_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (sg_len < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) &burst_size, &slave_bw) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) INIT_LIST_HEAD(&req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) csr |= TEGRA_APBDMA_CSR_ONCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) csr |= TEGRA_APBDMA_CSR_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (flags & DMA_PREP_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) csr |= TEGRA_APBDMA_CSR_IE_EOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) dma_desc = tegra_dma_desc_get(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (!dma_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) INIT_LIST_HEAD(&dma_desc->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) INIT_LIST_HEAD(&dma_desc->cb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) dma_desc->cb_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) dma_desc->bytes_requested = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) dma_desc->bytes_transferred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) dma_desc->dma_status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* Make transfer requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) u32 len, mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) mem = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if ((len & 3) || (mem & 3) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) len > tdc->tdma->chip_data->max_dma_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) dev_err(tdc2dev(tdc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) "DMA length/memory address is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) tegra_dma_desc_put(tdc, dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) sg_req = tegra_dma_sg_req_get(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (!sg_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) tegra_dma_desc_put(tdc, dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) dma_desc->bytes_requested += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) sg_req->ch_regs.apb_ptr = apb_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) sg_req->ch_regs.ahb_ptr = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) sg_req->ch_regs.csr = csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) sg_req->ch_regs.apb_seq = apb_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) sg_req->ch_regs.ahb_seq = ahb_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) sg_req->configured = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) sg_req->last_sg = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) sg_req->dma_desc = dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) sg_req->req_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) list_add_tail(&sg_req->node, &dma_desc->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) sg_req->last_sg = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (flags & DMA_CTRL_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dma_desc->txd.flags = DMA_CTRL_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * Make sure that mode should not be conflicting with currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * configured mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (!tdc->isr_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) tdc->isr_handler = handle_once_dma_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) tdc->cyclic = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (tdc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) tegra_dma_desc_put(tdc, dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return &dma_desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) size_t buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct tegra_dma_sg_req *sg_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) u32 csr, ahb_seq, apb_ptr, apb_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) enum dma_slave_buswidth slave_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct tegra_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) dma_addr_t mem = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) unsigned int burst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) size_t len, remain_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (!buf_len || !period_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (!tdc->config_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * We allow to take more number of requests till DMA is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * not started. The driver will loop over all requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * Once DMA is started then new requests can be queued only after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * terminating the DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (tdc->busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * We only support cycle transfer when buf_len is multiple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * period_len.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (buf_len % period_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) len = period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if ((len & 3) || (buf_addr & 3) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) len > tdc->tdma->chip_data->max_dma_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) &burst_size, &slave_bw) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) csr |= TEGRA_APBDMA_CSR_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (flags & DMA_PREP_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) csr |= TEGRA_APBDMA_CSR_IE_EOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) dma_desc = tegra_dma_desc_get(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (!dma_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) dev_err(tdc2dev(tdc), "not enough descriptors available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) INIT_LIST_HEAD(&dma_desc->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) INIT_LIST_HEAD(&dma_desc->cb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) dma_desc->cb_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) dma_desc->bytes_transferred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) dma_desc->bytes_requested = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) remain_len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /* Split transfer equal to period size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) while (remain_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) sg_req = tegra_dma_sg_req_get(tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (!sg_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) tegra_dma_desc_put(tdc, dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) sg_req->ch_regs.apb_ptr = apb_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) sg_req->ch_regs.ahb_ptr = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) sg_req->ch_regs.csr = csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) sg_req->ch_regs.apb_seq = apb_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) sg_req->ch_regs.ahb_seq = ahb_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) sg_req->configured = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) sg_req->last_sg = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) sg_req->dma_desc = dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) sg_req->req_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) list_add_tail(&sg_req->node, &dma_desc->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) remain_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) mem += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) sg_req->last_sg = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (flags & DMA_CTRL_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) dma_desc->txd.flags = DMA_CTRL_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * Make sure that mode should not be conflicting with currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * configured mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!tdc->isr_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) tdc->cyclic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (!tdc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) tegra_dma_desc_put(tdc, dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return &dma_desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) dma_cookie_init(&tdc->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) static void tegra_dma_free_chan_resources(struct dma_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) struct tegra_dma_desc *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) struct tegra_dma_sg_req *sg_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) struct list_head dma_desc_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) struct list_head sg_req_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) INIT_LIST_HEAD(&dma_desc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) INIT_LIST_HEAD(&sg_req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) tegra_dma_terminate_all(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) tasklet_kill(&tdc->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) list_splice_init(&tdc->pending_sg_req, &sg_req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) list_splice_init(&tdc->free_sg_req, &sg_req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) INIT_LIST_HEAD(&tdc->cb_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) tdc->config_init = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) tdc->isr_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) while (!list_empty(&dma_desc_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) dma_desc = list_first_entry(&dma_desc_list, typeof(*dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) list_del(&dma_desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) kfree(dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) while (!list_empty(&sg_req_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) list_del(&sg_req->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) kfree(sg_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct tegra_dma *tdma = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct tegra_dma_channel *tdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) chan = dma_get_any_slave_channel(&tdma->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) tdc = to_tegra_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) tdc->slave_id = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /* Tegra20 specific DMA controller information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) .nr_channels = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) .channel_reg_size = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) .max_dma_count = 1024UL * 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) .support_channel_pause = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) .support_separate_wcount_reg = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /* Tegra30 specific DMA controller information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) .nr_channels = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) .channel_reg_size = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) .max_dma_count = 1024UL * 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) .support_channel_pause = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) .support_separate_wcount_reg = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) /* Tegra114 specific DMA controller information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) .nr_channels = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) .channel_reg_size = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) .max_dma_count = 1024UL * 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) .support_channel_pause = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) .support_separate_wcount_reg = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /* Tegra148 specific DMA controller information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) .nr_channels = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) .channel_reg_size = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) .max_dma_count = 1024UL * 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) .support_channel_pause = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) .support_separate_wcount_reg = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) static int tegra_dma_init_hw(struct tegra_dma *tdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) err = reset_control_assert(tdma->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) dev_err(tdma->dev, "failed to assert reset: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) err = clk_enable(tdma->dma_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) dev_err(tdma->dev, "failed to enable clk: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /* reset DMA controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) reset_control_deassert(tdma->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /* enable global DMA registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) clk_disable(tdma->dma_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) static int tegra_dma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) const struct tegra_dma_chip_data *cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) struct tegra_dma *tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) cdata = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) size = struct_size(tdma, channels, cdata->nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) tdma = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (!tdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) tdma->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) tdma->chip_data = cdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) platform_set_drvdata(pdev, tdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (IS_ERR(tdma->base_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return PTR_ERR(tdma->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (IS_ERR(tdma->dma_clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) dev_err(&pdev->dev, "Error: Missing controller clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) return PTR_ERR(tdma->dma_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (IS_ERR(tdma->rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) dev_err(&pdev->dev, "Error: Missing reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) return PTR_ERR(tdma->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) spin_lock_init(&tdma->global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) ret = clk_prepare(tdma->dma_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) ret = tegra_dma_init_hw(tdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) goto err_clk_unprepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) pm_runtime_irq_safe(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) INIT_LIST_HEAD(&tdma->dma_dev.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) for (i = 0; i < cdata->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct tegra_dma_channel *tdc = &tdma->channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) tdc->chan_addr = tdma->base_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) (i * cdata->channel_reg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) irq = platform_get_irq(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) ret = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) goto err_pm_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) ret = devm_request_irq(&pdev->dev, irq, tegra_dma_isr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) tdc->name, tdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) "request_irq failed with err %d channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) ret, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) goto err_pm_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) tdc->dma_chan.device = &tdma->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) dma_cookie_init(&tdc->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) list_add_tail(&tdc->dma_chan.device_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) &tdma->dma_dev.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) tdc->tdma = tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) tdc->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) tasklet_setup(&tdc->tasklet, tegra_dma_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) spin_lock_init(&tdc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) init_waitqueue_head(&tdc->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) INIT_LIST_HEAD(&tdc->pending_sg_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) INIT_LIST_HEAD(&tdc->free_sg_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) INIT_LIST_HEAD(&tdc->free_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) INIT_LIST_HEAD(&tdc->cb_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) tdma->global_pause_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) tdma->dma_dev.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) tdma->dma_dev.device_alloc_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) tegra_dma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) tdma->dma_dev.device_free_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) tegra_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) tdma->dma_dev.device_config = tegra_dma_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) tdma->dma_dev.device_synchronize = tegra_dma_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) ret = dma_async_device_register(&tdma->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) "Tegra20 APB DMA driver registration failed %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) goto err_pm_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) ret = of_dma_controller_register(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) tegra_dma_of_xlate, tdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) "Tegra20 APB DMA OF registration failed %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) goto err_unregister_dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) dev_info(&pdev->dev, "Tegra20 APB DMA driver registered %u channels\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) cdata->nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) err_unregister_dma_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) dma_async_device_unregister(&tdma->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) err_pm_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) err_clk_unprepare:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) clk_unprepare(tdma->dma_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static int tegra_dma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct tegra_dma *tdma = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) dma_async_device_unregister(&tdma->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) clk_unprepare(tdma->dma_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct tegra_dma *tdma = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) clk_disable(tdma->dma_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) static int __maybe_unused tegra_dma_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) struct tegra_dma *tdma = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) return clk_enable(tdma->dma_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) static int __maybe_unused tegra_dma_dev_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) struct tegra_dma *tdma = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) bool busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) for (i = 0; i < tdma->chip_data->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) struct tegra_dma_channel *tdc = &tdma->channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) tasklet_kill(&tdc->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) spin_lock_irqsave(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) busy = tdc->busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) spin_unlock_irqrestore(&tdc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) dev_err(tdma->dev, "channel %u busy\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return pm_runtime_force_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) static int __maybe_unused tegra_dma_dev_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct tegra_dma *tdma = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) err = tegra_dma_init_hw(tdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return pm_runtime_force_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_dev_suspend, tegra_dma_dev_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) static const struct of_device_id tegra_dma_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) .compatible = "nvidia,tegra148-apbdma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) .data = &tegra148_dma_chip_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) .compatible = "nvidia,tegra114-apbdma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) .data = &tegra114_dma_chip_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) .compatible = "nvidia,tegra30-apbdma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) .data = &tegra30_dma_chip_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) .compatible = "nvidia,tegra20-apbdma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) .data = &tegra20_dma_chip_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static struct platform_driver tegra_dmac_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) .name = "tegra-apbdma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) .pm = &tegra_dma_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) .of_match_table = tegra_dma_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) .probe = tegra_dma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) .remove = tegra_dma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) module_platform_driver(tegra_dmac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) MODULE_LICENSE("GPL v2");