^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) Ericsson AB 2007-2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) ST-Ericsson SA 2008-2010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/amba/bus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/platform_data/dma-ste-dma40.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "ste_dma40_ll.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define D40_NAME "dma40"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define D40_PHY_CHAN -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* For masking out/in 2 bit channel positions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define D40_CHAN_POS(chan) (2 * (chan / 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* Maximum iterations taken before giving up suspending a channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define D40_SUSPEND_MAX_IT 500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Milliseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define DMA40_AUTOSUSPEND_DELAY 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Hardware requirement on LCLA alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define LCLA_ALIGNMENT 0x40000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* Max number of links per event group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define D40_LCLA_LINK_PER_EVENT_GRP 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Max number of logical channels per physical channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define D40_MAX_LOG_CHAN_PER_PHY 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Attempts before giving up to trying to get pages that are aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define MAX_LCLA_ALLOC_ATTEMPTS 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Bit markings for allocation map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define D40_ALLOC_FREE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define D40_ALLOC_PHY BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define D40_ALLOC_LOG_FREE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define D40_MEMCPY_MAX_CHANS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* Reserved event lines for memcpy only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define DB8500_DMA_MEMCPY_EV_0 51
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define DB8500_DMA_MEMCPY_EV_1 56
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define DB8500_DMA_MEMCPY_EV_2 57
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define DB8500_DMA_MEMCPY_EV_3 58
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define DB8500_DMA_MEMCPY_EV_4 59
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define DB8500_DMA_MEMCPY_EV_5 60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static int dma40_memcpy_channels[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) DB8500_DMA_MEMCPY_EV_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) DB8500_DMA_MEMCPY_EV_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) DB8500_DMA_MEMCPY_EV_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) DB8500_DMA_MEMCPY_EV_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) DB8500_DMA_MEMCPY_EV_4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) DB8500_DMA_MEMCPY_EV_5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Default configuration for physcial memcpy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static const struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) .mode = STEDMA40_MODE_PHYSICAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) .dir = DMA_MEM_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) .src_info.psize = STEDMA40_PSIZE_PHY_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) .dst_info.psize = STEDMA40_PSIZE_PHY_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Default configuration for logical memcpy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static const struct stedma40_chan_cfg dma40_memcpy_conf_log = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) .mode = STEDMA40_MODE_LOGICAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) .dir = DMA_MEM_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) .src_info.psize = STEDMA40_PSIZE_LOG_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .dst_info.psize = STEDMA40_PSIZE_LOG_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * enum 40_command - The different commands and/or statuses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) enum d40_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) D40_DMA_STOP = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) D40_DMA_RUN = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) D40_DMA_SUSPEND_REQ = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) D40_DMA_SUSPENDED = 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * enum d40_events - The different Event Enables for the event lines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * @D40_ROUND_EVENTLINE: Status check for event line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) enum d40_events {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) D40_DEACTIVATE_EVENTLINE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) D40_ACTIVATE_EVENTLINE = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) D40_SUSPEND_REQ_EVENTLINE = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) D40_ROUND_EVENTLINE = 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * These are the registers that has to be saved and later restored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * when the DMA hw is powered off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static __maybe_unused u32 d40_backup_regs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) D40_DREG_LCPA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) D40_DREG_LCLA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) D40_DREG_PRMSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) D40_DREG_PRMSO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) D40_DREG_PRMOE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) D40_DREG_PRMOO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * since 9540 and 8540 has the same HW revision
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * use v4a for 9540 or ealier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * use v4b for 8540 or later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * HW revision:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * DB8500ed has revision 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * DB8500v1 has revision 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * DB8500v2 has revision 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * AP9540v1 has revision 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * DB8540v1 has revision 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * TODO: Check if all these registers have to be saved/restored on dma40 v4a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static u32 d40_backup_regs_v4a[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) D40_DREG_PSEG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) D40_DREG_PSEG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) D40_DREG_PSEG3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) D40_DREG_PSEG4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) D40_DREG_PCEG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) D40_DREG_PCEG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) D40_DREG_PCEG3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) D40_DREG_PCEG4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) D40_DREG_RSEG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) D40_DREG_RSEG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) D40_DREG_RSEG3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) D40_DREG_RSEG4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) D40_DREG_RCEG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) D40_DREG_RCEG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) D40_DREG_RCEG3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) D40_DREG_RCEG4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static u32 d40_backup_regs_v4b[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) D40_DREG_CPSEG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) D40_DREG_CPSEG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) D40_DREG_CPSEG3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) D40_DREG_CPSEG4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) D40_DREG_CPSEG5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) D40_DREG_CPCEG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) D40_DREG_CPCEG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) D40_DREG_CPCEG3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) D40_DREG_CPCEG4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) D40_DREG_CPCEG5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) D40_DREG_CRSEG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) D40_DREG_CRSEG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) D40_DREG_CRSEG3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) D40_DREG_CRSEG4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) D40_DREG_CRSEG5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) D40_DREG_CRCEG1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) D40_DREG_CRCEG2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) D40_DREG_CRCEG3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) D40_DREG_CRCEG4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) D40_DREG_CRCEG5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static __maybe_unused u32 d40_backup_regs_chan[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) D40_CHAN_REG_SSCFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) D40_CHAN_REG_SSELT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) D40_CHAN_REG_SSPTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) D40_CHAN_REG_SSLNK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) D40_CHAN_REG_SDCFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) D40_CHAN_REG_SDELT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) D40_CHAN_REG_SDPTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) D40_CHAN_REG_SDLNK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * struct d40_interrupt_lookup - lookup table for interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * @src: Interrupt mask register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * @clr: Interrupt clear register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * @is_error: true if this is an error interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * @offset: start delta in the lookup_log_chans in d40_base. If equals to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct d40_interrupt_lookup {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u32 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u32 clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) bool is_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static struct d40_interrupt_lookup il_v4a[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static struct d40_interrupt_lookup il_v4b[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * struct d40_reg_val - simple lookup struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * @reg: The register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * @val: The value that belongs to the register in reg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct d40_reg_val {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) unsigned int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* Clock every part of the DMA block from start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* Interrupts on all logical channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Clock every part of the DMA block from start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Interrupts on all logical channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * struct d40_lli_pool - Structure for keeping LLIs in memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * @base: Pointer to memory area when the pre_alloc_lli's are not large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * pre_alloc_lli is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * @dma_addr: DMA address, if mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * one buffer to one buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct d40_lli_pool {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) void *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* Space for dst and src, plus an extra for padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * struct d40_desc - A descriptor is one DMA job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * @lli_phy: LLI settings for physical channel. Both src and dst=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * lli_len equals one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * @lli_log: Same as above but for logical channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * @lli_pool: The pool with two entries pre-allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * @lli_len: Number of llis of current descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * @lli_current: Number of transferred llis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * @lcla_alloc: Number of LCLA entries allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * @txd: DMA engine struct. Used for among other things for communication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * during a transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * @node: List entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * @is_in_client_list: true if the client owns this descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * @cyclic: true if this is a cyclic job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * This descriptor is used for both logical and physical transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct d40_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* LLI physical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct d40_phy_lli_bidir lli_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* LLI logical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct d40_log_lli_bidir lli_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct d40_lli_pool lli_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int lli_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) int lli_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int lcla_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct dma_async_tx_descriptor txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) bool is_in_client_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * struct d40_lcla_pool - LCLA pool settings and data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * @base: The virtual address of LCLA. 18 bit aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * @dma_addr: DMA address, if mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * This pointer is only there for clean-up on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * @pages: The number of pages needed for all physical channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * Only used later for clean-up on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * @lock: Lock to protect the content in this struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * @alloc_map: big map over which LCLA entry is own by which job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct d40_lcla_pool {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) void *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) void *base_unaligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct d40_desc **alloc_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * struct d40_phy_res - struct for handling eventlines mapped to physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * @lock: A lock protection this entity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * @reserved: True if used by secure world or otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * @num: The physical channel number of this entity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * @allocated_src: Bit mapped to show which src event line's are mapped to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * this physical channel. Can also be free or physically allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * @allocated_dst: Same as for src but is dst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * event line number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct d40_phy_res {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) bool reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) u32 allocated_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) u32 allocated_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) bool use_soft_lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct d40_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * struct d40_chan - Struct that describes a channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * @lock: A spinlock to protect this struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * @log_num: The logical number, if any of this channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * @pending_tx: The number of pending transfers. Used between interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * and tasklet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * @busy: Set to true when transfer is ongoing on this channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * @phy_chan: Pointer to physical channel which this instance runs on. If this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * point is NULL, then the channel is not allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * @chan: DMA engine handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * transfer and call client callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * @client: Cliented owned descriptor list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * @pending_queue: Submitted jobs, to be issued by issue_pending()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * @active: Active descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * @done: Completed jobs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * @queue: Queued jobs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * @prepare_queue: Prepared jobs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * @dma_cfg: The client configuration of this dma channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * @slave_config: DMA slave configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * @configured: whether the dma_cfg configuration is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * @base: Pointer to the device instance struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * @src_def_cfg: Default cfg register setting for src.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * @dst_def_cfg: Default cfg register setting for dst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * @log_def: Default logical channel settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * @lcpa: Pointer to dst and src lcpa settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * @runtime_addr: runtime configured address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * @runtime_direction: runtime configured direction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * This struct can either "be" a logical or a physical channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct d40_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) int log_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) int pending_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) bool busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct d40_phy_res *phy_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct dma_chan chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct list_head client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct list_head pending_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct list_head active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct list_head done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct list_head queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct list_head prepare_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct stedma40_chan_cfg dma_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct dma_slave_config slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) bool configured;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct d40_base *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Default register configurations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) u32 src_def_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) u32 dst_def_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct d40_def_lcsp log_def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct d40_log_lli_full *lcpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Runtime reconfiguration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) dma_addr_t runtime_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) enum dma_transfer_direction runtime_direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * @backup: the pointer to the registers address array for backup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * @backup_size: the size of the registers address array for backup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * @realtime_en: the realtime enable register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * @realtime_clear: the realtime clear register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * @high_prio_en: the high priority enable register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * @high_prio_clear: the high priority clear register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * @interrupt_en: the interrupt enable register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * @interrupt_clear: the interrupt clear register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * @il: the pointer to struct d40_interrupt_lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * @il_size: the size of d40_interrupt_lookup array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * @init_reg: the pointer to the struct d40_reg_val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * @init_reg_size: the size of d40_reg_val array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct d40_gen_dmac {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) u32 *backup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) u32 backup_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) u32 realtime_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) u32 realtime_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) u32 high_prio_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) u32 high_prio_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) u32 interrupt_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) u32 interrupt_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct d40_interrupt_lookup *il;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) u32 il_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct d40_reg_val *init_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) u32 init_reg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * struct d40_base - The big global struct, one for each probe'd instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * @execmd_lock: Lock for execute command usage since several channels share
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * the same physical register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * @dev: The device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * @virtbase: The virtual base address of the DMA's register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * @rev: silicon revision detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * @clk: Pointer to the DMA clock structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * @phy_start: Physical memory start of the DMA registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * @phy_size: Size of the DMA register map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * @irq: The IRQ number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * transfers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * @num_phy_chans: The number of physical channels. Read from HW. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * is the number of available channels for this driver, not counting "Secure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * mode" allocated physical channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * @num_log_chans: The number of logical channels. Calculated from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * num_phy_chans.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * @dma_both: dma_device channels that can do both memcpy and slave transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * @dma_slave: dma_device channels that can do only do slave transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * @phy_chans: Room for all possible physical channels in system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * @log_chans: Room for all possible logical channels in system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * @lookup_log_chans: Used to map interrupt number to logical channel. Points
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * to log_chans entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * to phy_chans entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * @plat_data: Pointer to provided platform_data which is the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * @phy_res: Vector containing all physical channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * @lcla_pool: lcla pool settings and data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * @lcpa_base: The virtual mapped address of LCPA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * @phy_lcpa: The physical address of the LCPA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * @lcpa_size: The size of the LCPA area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * @desc_slab: cache for descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * @reg_val_backup: Here the values of some hardware registers are stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * before the DMA is powered off. They are restored when the power is back on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * @reg_val_backup_chan: Backup data for standard channel parameter registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * @regs_interrupt: Scratch space for registers during interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * @gen_dmac: the struct for generic registers values to represent u8500/8540
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct d40_base {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) spinlock_t interrupt_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) spinlock_t execmd_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) void __iomem *virtbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) u8 rev:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) phys_addr_t phy_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) resource_size_t phy_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) int num_memcpy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int num_phy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int num_log_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct dma_device dma_both;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct dma_device dma_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct dma_device dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct d40_chan *phy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct d40_chan *log_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct d40_chan **lookup_log_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct d40_chan **lookup_phy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct stedma40_platform_data *plat_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct regulator *lcpa_regulator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* Physical half channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct d40_phy_res *phy_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct d40_lcla_pool lcla_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) void *lcpa_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dma_addr_t phy_lcpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) resource_size_t lcpa_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct kmem_cache *desc_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) u32 reg_val_backup[BACKUP_REGS_SZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) u32 *reg_val_backup_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) u32 *regs_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) u16 gcc_pwr_off_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct d40_gen_dmac gen_dmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static struct device *chan2dev(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return &d40c->chan.dev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static bool chan_is_physical(struct d40_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return chan->log_num == D40_PHY_CHAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static bool chan_is_logical(struct d40_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return !chan_is_physical(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static void __iomem *chan_base(struct d40_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return chan->base->virtbase + D40_DREG_PCBASE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) chan->phy_chan->num * D40_DREG_PCDELTA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) #define d40_err(dev, format, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) dev_err(dev, "[%s] " format, __func__, ## arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #define chan_err(d40c, format, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) d40_err(chan2dev(d40c), format, ## arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static int d40_set_runtime_config_write(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct dma_slave_config *config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) enum dma_transfer_direction direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) int lli_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) bool is_log = chan_is_logical(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) u32 align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) void *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (is_log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) align = sizeof(struct d40_log_lli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) align = sizeof(struct d40_phy_lli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (lli_len == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) base = d40d->lli_pool.pre_alloc_lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) d40d->lli_pool.base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) d40d->lli_pool.size = lli_len * 2 * align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) d40d->lli_pool.base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (d40d->lli_pool.base == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (is_log) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) d40d->lli_log.src = PTR_ALIGN(base, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) d40d->lli_log.dst = d40d->lli_log.src + lli_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) d40d->lli_pool.dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) d40d->lli_phy.src = PTR_ALIGN(base, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) d40d->lli_phy.src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) d40d->lli_pool.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (dma_mapping_error(d40c->base->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) d40d->lli_pool.dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) kfree(d40d->lli_pool.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) d40d->lli_pool.base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) d40d->lli_pool.dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (d40d->lli_pool.dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) d40d->lli_pool.size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) kfree(d40d->lli_pool.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) d40d->lli_pool.base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) d40d->lli_pool.size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) d40d->lli_log.src = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) d40d->lli_log.dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) d40d->lli_phy.src = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) d40d->lli_phy.dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static int d40_lcla_alloc_one(struct d40_chan *d40c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct d40_desc *d40d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * Allocate both src and dst at the same time, therefore the half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * start on 1 since 0 can't be used since zero is used as end marker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (!d40c->base->lcla_pool.alloc_map[idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) d40c->base->lcla_pool.alloc_map[idx] = d40d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) d40d->lcla_alloc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ret = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static int d40_lcla_free_all(struct d40_chan *d40c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct d40_desc *d40d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (chan_is_physical(d40c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) d40c->base->lcla_pool.alloc_map[idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) d40d->lcla_alloc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (d40d->lcla_alloc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static void d40_desc_remove(struct d40_desc *d40d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) list_del(&d40d->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct d40_desc *desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (!list_empty(&d40c->client)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct d40_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct d40_desc *_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) list_for_each_entry_safe(d, _d, &d40c->client, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (async_tx_test_ack(&d->txd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) d40_desc_remove(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) desc = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) memset(desc, 0, sizeof(*desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) INIT_LIST_HEAD(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) d40_pool_lli_free(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) d40_lcla_free_all(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) kmem_cache_free(d40c->base->desc_slab, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) list_add_tail(&desc->node, &d40c->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct d40_phy_lli *lli_src = desc->lli_phy.src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) void __iomem *base = chan_base(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) list_add_tail(&desc->node, &d40c->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct d40_lcla_pool *pool = &chan->base->lcla_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct d40_log_lli_bidir *lli = &desc->lli_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) int lli_current = desc->lli_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) int lli_len = desc->lli_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) bool cyclic = desc->cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) int curr_lcla = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) int first_lcla = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) bool linkback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * We may have partially running cyclic transfers, in case we did't get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * enough LCLA entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) linkback = cyclic && lli_current == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * For linkback, we need one LCLA even with only one link, because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * can't link back to the one in LCPA space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (linkback || (lli_len - lli_current > 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * If the channel is expected to use only soft_lli don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * allocate a lcla. This is to avoid a HW issue that exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * in some controller during a peripheral to memory transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * that uses linked lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (!(chan->phy_chan->use_soft_lli &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) chan->dma_cfg.dir == DMA_DEV_TO_MEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) curr_lcla = d40_lcla_alloc_one(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) first_lcla = curr_lcla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * For linkback, we normally load the LCPA in the loop since we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * link it to the second LCLA and not the first. However, if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * couldn't even get a first LCLA, then we have to run in LCPA and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * reload manually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (!linkback || curr_lcla == -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) unsigned int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (curr_lcla == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) flags |= LLI_TERM_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) d40_log_lli_lcpa_write(chan->lcpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) &lli->dst[lli_current],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) &lli->src[lli_current],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) curr_lcla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) lli_current++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (curr_lcla < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) goto set_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) for (; lli_current < lli_len; lli_current++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) unsigned int lcla_offset = chan->phy_chan->num * 1024 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 8 * curr_lcla * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct d40_log_lli *lcla = pool->base + lcla_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) unsigned int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) int next_lcla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (lli_current + 1 < lli_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) next_lcla = d40_lcla_alloc_one(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) next_lcla = linkback ? first_lcla : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (cyclic || next_lcla == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) flags |= LLI_TERM_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (linkback && curr_lcla == first_lcla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* First link goes in both LCPA and LCLA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) d40_log_lli_lcpa_write(chan->lcpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) &lli->dst[lli_current],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) &lli->src[lli_current],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) next_lcla, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * One unused LCLA in the cyclic case if the very first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * next_lcla fails...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) d40_log_lli_lcla_write(lcla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) &lli->dst[lli_current],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) &lli->src[lli_current],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) next_lcla, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * Cache maintenance is not needed if lcla is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * mapped in esram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (!use_esram_lcla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) dma_sync_single_range_for_device(chan->base->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) pool->dma_addr, lcla_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 2 * sizeof(struct d40_log_lli),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) curr_lcla = next_lcla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) lli_current++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) set_current:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) desc->lli_current = lli_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (chan_is_physical(d40c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) d40_phy_lli_load(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) d40d->lli_current = d40d->lli_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) d40_log_lli_to_lcxa(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return list_first_entry_or_null(&d40c->active, struct d40_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /* remove desc from current queue and add it to the pending_queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) d40_desc_remove(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) desc->is_in_client_list = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) list_add_tail(&desc->node, &d40c->pending_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return list_first_entry_or_null(&d40c->queue, struct d40_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) static struct d40_desc *d40_first_done(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return list_first_entry_or_null(&d40c->done, struct d40_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static int d40_psize_2_burst_size(bool is_log, int psize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (is_log) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (psize == STEDMA40_PSIZE_LOG_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (psize == STEDMA40_PSIZE_PHY_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return 2 << psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * The dma only supports transmitting packages up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * Calculate the total number of dma elements required to send the entire sg list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int dmalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) u32 max_w = max(data_width1, data_width2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) u32 min_w = min(data_width1, data_width2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (seg_max > STEDMA40_MAX_SEG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) seg_max -= max_w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (!IS_ALIGNED(size, max_w))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (size <= seg_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) dmalen = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) dmalen = size / seg_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (dmalen * seg_max < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) dmalen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return dmalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) u32 data_width1, u32 data_width2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) ret = d40_size_2_dmalen(sg_dma_len(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) data_width1, data_width2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) len += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static int __d40_execute_command_phy(struct d40_chan *d40c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) enum d40_command command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) void __iomem *active_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) u32 wmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (command == D40_DMA_STOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) spin_lock_irqsave(&d40c->base->execmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (d40c->phy_chan->num % 2 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (command == D40_DMA_SUSPEND_REQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) status = (readl(active_reg) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) D40_CHAN_POS(d40c->phy_chan->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) active_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (command == D40_DMA_SUSPEND_REQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) status = (readl(active_reg) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) D40_CHAN_POS(d40c->phy_chan->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * Reduce the number of bus accesses while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * waiting for the DMA to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) udelay(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (status == D40_DMA_STOP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) status == D40_DMA_SUSPENDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (i == D40_SUSPEND_MAX_IT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) chan_err(d40c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) "unable to suspend the chl %d (log: %d) status %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) d40c->phy_chan->num, d40c->log_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static void d40_term_all(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct d40_desc *d40d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) struct d40_desc *_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* Release completed descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) while ((d40d = d40_first_done(d40c))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) d40_desc_remove(d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) d40_desc_free(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /* Release active descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) while ((d40d = d40_first_active_get(d40c))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) d40_desc_remove(d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) d40_desc_free(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /* Release queued descriptors waiting for transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) while ((d40d = d40_first_queued(d40c))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) d40_desc_remove(d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) d40_desc_free(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /* Release pending descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) while ((d40d = d40_first_pending(d40c))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) d40_desc_remove(d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) d40_desc_free(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /* Release client owned descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (!list_empty(&d40c->client))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) d40_desc_remove(d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) d40_desc_free(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /* Release descriptors in prepare queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (!list_empty(&d40c->prepare_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) list_for_each_entry_safe(d40d, _d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) &d40c->prepare_queue, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) d40_desc_remove(d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) d40_desc_free(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) d40c->pending_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) static void __d40_config_set_event(struct d40_chan *d40c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) enum d40_events event_type, u32 event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) void __iomem *addr = chan_base(d40c) + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) int tries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) switch (event_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) case D40_DEACTIVATE_EVENTLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) | ~D40_EVENTLINE_MASK(event), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) case D40_SUSPEND_REQ_EVENTLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) D40_EVENTLINE_POS(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (status == D40_DEACTIVATE_EVENTLINE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) status == D40_SUSPEND_REQ_EVENTLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) | ~D40_EVENTLINE_MASK(event), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) D40_EVENTLINE_POS(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * Reduce the number of bus accesses while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * waiting for the DMA to suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) udelay(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (status == D40_DEACTIVATE_EVENTLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (tries == D40_SUSPEND_MAX_IT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) chan_err(d40c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) "unable to stop the event_line chl %d (log: %d)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) "status %x\n", d40c->phy_chan->num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) d40c->log_num, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) case D40_ACTIVATE_EVENTLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * The hardware sometimes doesn't register the enable when src and dst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * event lines are active on the same logical channel. Retry to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * it does. Usually only one retry is sufficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) tries = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) while (--tries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) writel((D40_ACTIVATE_EVENTLINE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) D40_EVENTLINE_POS(event)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) ~D40_EVENTLINE_MASK(event), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (readl(addr) & D40_EVENTLINE_MASK(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (tries != 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) dev_dbg(chan2dev(d40c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) "[%s] workaround enable S%cLNK (%d tries)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 100 - tries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) WARN_ON(!tries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) case D40_ROUND_EVENTLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static void d40_config_set_event(struct d40_chan *d40c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) enum d40_events event_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /* Enable event line connected to device (or memcpy) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) __d40_config_set_event(d40c, event_type, event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) D40_CHAN_REG_SSLNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) __d40_config_set_event(d40c, event_type, event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) D40_CHAN_REG_SDLNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static u32 d40_chan_has_events(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) void __iomem *chanbase = chan_base(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) val = readl(chanbase + D40_CHAN_REG_SSLNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) val |= readl(chanbase + D40_CHAN_REG_SDLNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) u32 active_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) void __iomem *active_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (d40c->phy_chan->num % 2 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) spin_lock_irqsave(&d40c->phy_chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) switch (command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) case D40_DMA_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) case D40_DMA_SUSPEND_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) active_status = (readl(active_reg) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) D40_CHAN_POS(d40c->phy_chan->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (active_status == D40_DMA_RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) ret = __d40_execute_command_phy(d40c, command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) case D40_DMA_RUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) ret = __d40_execute_command_phy(d40c, command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) case D40_DMA_SUSPENDED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static int d40_channel_execute_command(struct d40_chan *d40c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) enum d40_command command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (chan_is_logical(d40c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return __d40_execute_command_log(d40c, command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return __d40_execute_command_phy(d40c, command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) static u32 d40_get_prmo(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static const unsigned int phy_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) [STEDMA40_PCHAN_BASIC_MODE]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) = D40_DREG_PRMO_PCHAN_BASIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) [STEDMA40_PCHAN_MODULO_MODE]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) = D40_DREG_PRMO_PCHAN_MODULO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) [STEDMA40_PCHAN_DOUBLE_DST_MODE]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) static const unsigned int log_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (chan_is_physical(d40c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) return phy_map[d40c->dma_cfg.mode_opt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return log_map[d40c->dma_cfg.mode_opt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) static void d40_config_write(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) u32 addr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) u32 var;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /* Odd addresses are even addresses + 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) addr_base = (d40c->phy_chan->num % 2) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* Setup channel mode to logical or physical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) var = ((u32)(chan_is_logical(d40c)) + 1) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) D40_CHAN_POS(d40c->phy_chan->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /* Setup operational mode option register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (chan_is_logical(d40c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) & D40_SREG_ELEM_LOG_LIDX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) void __iomem *chanbase = chan_base(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /* Set default config for CFG reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /* Set LIDX for lcla */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) writel(lidx, chanbase + D40_CHAN_REG_SSELT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) writel(lidx, chanbase + D40_CHAN_REG_SDELT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /* Clear LNK which will be used by d40_chan_has_events() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) writel(0, chanbase + D40_CHAN_REG_SSLNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) writel(0, chanbase + D40_CHAN_REG_SDLNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static u32 d40_residue(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) u32 num_elt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (chan_is_logical(d40c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) >> D40_MEM_LCSP2_ECNT_POS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) >> D40_SREG_ELEM_PHY_ECNT_POS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return num_elt * d40c->dma_cfg.dst_info.data_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static bool d40_tx_is_linked(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) bool is_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (chan_is_logical(d40c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) & D40_SREG_LNK_PHYS_LNK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return is_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static int d40_pause(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (d40c->phy_chan == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) chan_err(d40c, "Channel is not allocated!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (!d40c->busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) spin_lock_irqsave(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) pm_runtime_get_sync(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) pm_runtime_mark_last_busy(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) pm_runtime_put_autosuspend(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) spin_unlock_irqrestore(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) static int d40_resume(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (d40c->phy_chan == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) chan_err(d40c, "Channel is not allocated!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (!d40c->busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) spin_lock_irqsave(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) pm_runtime_get_sync(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) /* If bytes left to transfer or linked tx resume job */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (d40_residue(d40c) || d40_tx_is_linked(d40c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) res = d40_channel_execute_command(d40c, D40_DMA_RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) pm_runtime_mark_last_busy(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) pm_runtime_put_autosuspend(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) spin_unlock_irqrestore(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct d40_chan *d40c = container_of(tx->chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) struct d40_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) spin_lock_irqsave(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) d40_desc_queue(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) spin_unlock_irqrestore(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) static int d40_start(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return d40_channel_execute_command(d40c, D40_DMA_RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct d40_desc *d40d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* Start queued jobs, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) d40d = d40_first_queued(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (d40d != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (!d40c->busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) d40c->busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) pm_runtime_get_sync(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /* Remove from queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) d40_desc_remove(d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /* Add to active queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) d40_desc_submit(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /* Initiate DMA job */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) d40_desc_load(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /* Start dma job */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) err = d40_start(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return d40d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) /* called from interrupt context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static void dma_tc_handle(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct d40_desc *d40d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) /* Get first active entry from list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) d40d = d40_first_active_get(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (d40d == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (d40d->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * If this was a paritially loaded list, we need to reloaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * it, and only when the list is completed. We need to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * for done because the interrupt will hit for every link, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * not just the last one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (d40d->lli_current < d40d->lli_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) && !d40_tx_is_linked(d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) && !d40_residue(d40c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) d40_lcla_free_all(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) d40_desc_load(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) (void) d40_start(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (d40d->lli_current == d40d->lli_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) d40d->lli_current = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) d40_lcla_free_all(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (d40d->lli_current < d40d->lli_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) d40_desc_load(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /* Start dma job */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) (void) d40_start(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (d40_queue_start(d40c) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) d40c->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) pm_runtime_mark_last_busy(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) pm_runtime_put_autosuspend(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) d40_desc_remove(d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) d40_desc_done(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) d40c->pending_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) tasklet_schedule(&d40c->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) static void dma_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) struct d40_chan *d40c = from_tasklet(d40c, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) struct d40_desc *d40d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) bool callback_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct dmaengine_desc_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) spin_lock_irqsave(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /* Get first entry from the done list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) d40d = d40_first_done(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (d40d == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /* Check if we have reached here for cyclic job */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) d40d = d40_first_active_get(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (d40d == NULL || !d40d->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) goto check_pending_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (!d40d->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) dma_cookie_complete(&d40d->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) * If terminating a channel pending_tx is set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * This prevents any finished active jobs to return to the client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (d40c->pending_tx == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) spin_unlock_irqrestore(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) /* Callback to client */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) dmaengine_desc_get_callback(&d40d->txd, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (!d40d->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (async_tx_test_ack(&d40d->txd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) d40_desc_remove(d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) d40_desc_free(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) } else if (!d40d->is_in_client_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) d40_desc_remove(d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) d40_lcla_free_all(d40c, d40d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) list_add_tail(&d40d->node, &d40c->client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) d40d->is_in_client_list = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) d40c->pending_tx--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (d40c->pending_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) tasklet_schedule(&d40c->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) spin_unlock_irqrestore(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (callback_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) dmaengine_desc_callback_invoke(&cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) check_pending_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) /* Rescue manouver if receiving double interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (d40c->pending_tx > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) d40c->pending_tx--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) spin_unlock_irqrestore(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) static irqreturn_t d40_handle_interrupt(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) u32 row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) long chan = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) struct d40_chan *d40c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) struct d40_base *base = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) u32 *regs = base->regs_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) struct d40_interrupt_lookup *il = base->gen_dmac.il;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) u32 il_size = base->gen_dmac.il_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) spin_lock_irqsave(&base->interrupt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) /* Read interrupt status of both logical and physical channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) for (i = 0; i < il_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) regs[i] = readl(base->virtbase + il[i].src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) chan = find_next_bit((unsigned long *)regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) BITS_PER_LONG * il_size, chan + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) /* No more set bits found? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (chan == BITS_PER_LONG * il_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) row = chan / BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) idx = chan & (BITS_PER_LONG - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (il[row].offset == D40_PHY_CHAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) d40c = base->lookup_phy_chans[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) d40c = base->lookup_log_chans[il[row].offset + idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (!d40c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) * No error because this can happen if something else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) * in the system is using the channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /* ACK interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) writel(BIT(idx), base->virtbase + il[row].clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) spin_lock(&d40c->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (!il[row].is_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) dma_tc_handle(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) chan, il[row].offset, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) spin_unlock(&d40c->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) spin_unlock_irqrestore(&base->interrupt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) static int d40_validate_conf(struct d40_chan *d40c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) struct stedma40_chan_cfg *conf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (!conf->dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) chan_err(d40c, "Invalid direction.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) (conf->dev_type < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) if (conf->dir == DMA_DEV_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * DMAC HW supports it. Will be added to this driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * in case any dma client requires it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) chan_err(d40c, "periph to periph not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) conf->src_info.data_width !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) conf->dst_info.data_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * The DMAC hardware only supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * src (burst x width) == dst (burst x width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) static bool d40_alloc_mask_set(struct d40_phy_res *phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) bool is_src, int log_event_line, bool is_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) bool *first_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) spin_lock_irqsave(&phy->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) *first_user = ((phy->allocated_src | phy->allocated_dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) == D40_ALLOC_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (!is_log) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) /* Physical interrupts are masked per physical full channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (phy->allocated_src == D40_ALLOC_FREE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) phy->allocated_dst == D40_ALLOC_FREE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) phy->allocated_dst = D40_ALLOC_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) phy->allocated_src = D40_ALLOC_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) goto found_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) goto not_found_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /* Logical channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (is_src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (phy->allocated_src == D40_ALLOC_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) goto not_found_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (phy->allocated_src == D40_ALLOC_FREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) phy->allocated_src = D40_ALLOC_LOG_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (!(phy->allocated_src & BIT(log_event_line))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) phy->allocated_src |= BIT(log_event_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) goto found_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) goto not_found_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (phy->allocated_dst == D40_ALLOC_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) goto not_found_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (phy->allocated_dst == D40_ALLOC_FREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) phy->allocated_dst = D40_ALLOC_LOG_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (!(phy->allocated_dst & BIT(log_event_line))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) phy->allocated_dst |= BIT(log_event_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) goto found_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) not_found_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) spin_unlock_irqrestore(&phy->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) found_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) spin_unlock_irqrestore(&phy->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) int log_event_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) bool is_free = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) spin_lock_irqsave(&phy->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (!log_event_line) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) phy->allocated_dst = D40_ALLOC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) phy->allocated_src = D40_ALLOC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) is_free = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) /* Logical channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (is_src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) phy->allocated_src &= ~BIT(log_event_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (phy->allocated_src == D40_ALLOC_LOG_FREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) phy->allocated_src = D40_ALLOC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) phy->allocated_dst &= ~BIT(log_event_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) phy->allocated_dst = D40_ALLOC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) is_free = ((phy->allocated_src | phy->allocated_dst) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) D40_ALLOC_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) spin_unlock_irqrestore(&phy->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) return is_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) int dev_type = d40c->dma_cfg.dev_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) int event_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) int event_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) struct d40_phy_res *phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) int log_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) int num_phy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) bool is_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) phys = d40c->base->phy_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) num_phy_chans = d40c->base->num_phy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) log_num = 2 * dev_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) is_src = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /* dst event lines are used for logical memcpy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) log_num = 2 * dev_type + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) is_src = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) event_group = D40_TYPE_TO_GROUP(dev_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) event_line = D40_TYPE_TO_EVENT(dev_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (!is_log) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) /* Find physical half channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (d40c->dma_cfg.use_fixed_channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) i = d40c->dma_cfg.phy_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (d40_alloc_mask_set(&phys[i], is_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 0, is_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) first_phy_user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) goto found_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) for (i = 0; i < num_phy_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (d40_alloc_mask_set(&phys[i], is_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 0, is_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) first_phy_user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) goto found_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) int phy_num = j + event_group * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) for (i = phy_num; i < phy_num + 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (d40_alloc_mask_set(&phys[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) is_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) is_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) first_phy_user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) goto found_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) found_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) d40c->phy_chan = &phys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) d40c->log_num = D40_PHY_CHAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (dev_type == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) /* Find logical channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) int phy_num = j + event_group * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (d40c->dma_cfg.use_fixed_channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) i = d40c->dma_cfg.phy_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if ((i != phy_num) && (i != phy_num + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) dev_err(chan2dev(d40c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) "invalid fixed phy channel %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) if (d40_alloc_mask_set(&phys[i], is_src, event_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) is_log, first_phy_user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) goto found_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) dev_err(chan2dev(d40c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) "could not allocate fixed phy channel %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * Spread logical channels across all available physical rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * than pack every logical channel at the first available phy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if (is_src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) for (i = phy_num; i < phy_num + 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) if (d40_alloc_mask_set(&phys[i], is_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) event_line, is_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) first_phy_user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) goto found_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) for (i = phy_num + 1; i >= phy_num; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (d40_alloc_mask_set(&phys[i], is_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) event_line, is_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) first_phy_user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) goto found_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) found_log:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) d40c->phy_chan = &phys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) d40c->log_num = log_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (is_log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) d40c->base->lookup_log_chans[d40c->log_num] = d40c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) static int d40_config_memcpy(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) dma_cap_mask_t cap = d40c->chan.device->cap_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) d40c->dma_cfg = dma40_memcpy_conf_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) d40_log_cfg(&d40c->dma_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) } else if (dma_has_cap(DMA_MEMCPY, cap) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) dma_has_cap(DMA_SLAVE, cap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) d40c->dma_cfg = dma40_memcpy_conf_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) /* Generate interrrupt at end of transfer or relink. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) /* Generate interrupt on error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) chan_err(d40c, "No memcpy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) static int d40_free_dma(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) struct d40_phy_res *phy = d40c->phy_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) bool is_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) /* Terminate all queued and active transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) d40_term_all(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (phy == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) chan_err(d40c, "phy == null\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (phy->allocated_src == D40_ALLOC_FREE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) phy->allocated_dst == D40_ALLOC_FREE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) chan_err(d40c, "channel already free\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) is_src = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) is_src = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) chan_err(d40c, "Unknown direction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) pm_runtime_get_sync(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) res = d40_channel_execute_command(d40c, D40_DMA_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) chan_err(d40c, "stop failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) goto mark_last_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (chan_is_logical(d40c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) d40c->base->lookup_log_chans[d40c->log_num] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) d40c->base->lookup_phy_chans[phy->num] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (d40c->busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) pm_runtime_mark_last_busy(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) pm_runtime_put_autosuspend(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) d40c->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) d40c->phy_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) d40c->configured = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) mark_last_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) pm_runtime_mark_last_busy(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) pm_runtime_put_autosuspend(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) static bool d40_is_paused(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) void __iomem *chanbase = chan_base(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) bool is_paused = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) void __iomem *active_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) spin_lock_irqsave(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (chan_is_physical(d40c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (d40c->phy_chan->num % 2 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) status = (readl(active_reg) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) D40_CHAN_POS(d40c->phy_chan->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) is_paused = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) status = readl(chanbase + D40_CHAN_REG_SDLNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) status = readl(chanbase + D40_CHAN_REG_SSLNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) chan_err(d40c, "Unknown direction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) status = (status & D40_EVENTLINE_MASK(event)) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) D40_EVENTLINE_POS(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (status != D40_DMA_RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) is_paused = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) spin_unlock_irqrestore(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) return is_paused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) static u32 stedma40_residue(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) struct d40_chan *d40c =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) container_of(chan, struct d40_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) u32 bytes_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) spin_lock_irqsave(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) bytes_left = d40_residue(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) spin_unlock_irqrestore(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) return bytes_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) struct scatterlist *sg_src, struct scatterlist *sg_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) unsigned int sg_len, dma_addr_t src_dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) dma_addr_t dst_dev_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) struct stedma40_half_channel_info *src_info = &cfg->src_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) ret = d40_log_sg_to_lli(sg_src, sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) src_dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) desc->lli_log.src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) chan->log_def.lcsp1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) src_info->data_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) dst_info->data_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) ret = d40_log_sg_to_lli(sg_dst, sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) dst_dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) desc->lli_log.dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) chan->log_def.lcsp3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) dst_info->data_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) src_info->data_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) return ret < 0 ? ret : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) struct scatterlist *sg_src, struct scatterlist *sg_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) unsigned int sg_len, dma_addr_t src_dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) dma_addr_t dst_dev_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) struct stedma40_half_channel_info *src_info = &cfg->src_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) if (desc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) flags |= LLI_CYCLIC | LLI_TERM_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) desc->lli_phy.src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) virt_to_phys(desc->lli_phy.src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) chan->src_def_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) src_info, dst_info, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) desc->lli_phy.dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) virt_to_phys(desc->lli_phy.dst),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) chan->dst_def_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) dst_info, src_info, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) desc->lli_pool.size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) return ret < 0 ? ret : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) static struct d40_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) unsigned int sg_len, unsigned long dma_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) struct stedma40_chan_cfg *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) struct d40_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) desc = d40_desc_get(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) cfg = &chan->dma_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) cfg->dst_info.data_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (desc->lli_len < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) chan_err(chan, "Unaligned size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) goto free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) chan_err(chan, "Could not allocate lli\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) goto free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) desc->lli_current = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) desc->txd.flags = dma_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) desc->txd.tx_submit = d40_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) free_desc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) d40_desc_free(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) struct scatterlist *sg_dst, unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) enum dma_transfer_direction direction, unsigned long dma_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) dma_addr_t src_dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) dma_addr_t dst_dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) struct d40_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (!chan->phy_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) chan_err(chan, "Cannot prepare unallocated channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) d40_set_runtime_config_write(dchan, &chan->slave_config, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) if (desc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) if (sg_next(&sg_src[sg_len - 1]) == sg_src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) desc->cyclic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) src_dev_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) dst_dev_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) if (direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) src_dev_addr = chan->runtime_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) else if (direction == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) dst_dev_addr = chan->runtime_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) if (chan_is_logical(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) sg_len, src_dev_addr, dst_dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) sg_len, src_dev_addr, dst_dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) chan_err(chan, "Failed to prepare %s sg job: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) chan_is_logical(chan) ? "log" : "phy", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) goto free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) * add descriptor to the prepare queue in order to be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) * to free them later in terminate_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) list_add_tail(&desc->node, &chan->prepare_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) return &desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) free_desc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) d40_desc_free(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) bool stedma40_filter(struct dma_chan *chan, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) struct stedma40_chan_cfg *info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) struct d40_chan *d40c =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) container_of(chan, struct d40_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) err = d40_validate_conf(d40c, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) d40c->dma_cfg = *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) err = d40_config_memcpy(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) d40c->configured = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) return err == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) EXPORT_SYMBOL(stedma40_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) bool realtime = d40c->dma_cfg.realtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) bool highprio = d40c->dma_cfg.high_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) u32 rtreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) u32 event = D40_TYPE_TO_EVENT(dev_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) u32 group = D40_TYPE_TO_GROUP(dev_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) u32 bit = BIT(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) u32 prioreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) * Due to a hardware bug, in some cases a logical channel triggered by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * a high priority destination event line can generate extra packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) * transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) * The workaround is to not set the high priority level for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) * destination event lines that trigger logical channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (!src && chan_is_logical(d40c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) highprio = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) /* Destination event lines are stored in the upper halfword */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (!src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) bit <<= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) writel(bit, d40c->base->virtbase + prioreg + group * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) writel(bit, d40c->base->virtbase + rtreg + group * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) static void d40_set_prio_realtime(struct d40_chan *d40c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (d40c->base->rev < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) #define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) #define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) struct stedma40_chan_cfg cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) dma_cap_mask_t cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) dma_cap_zero(cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) dma_cap_set(DMA_SLAVE, cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) cfg.dev_type = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) flags = dma_spec->args[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) switch (D40_DT_FLAGS_MODE(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) switch (D40_DT_FLAGS_DIR(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) cfg.dir = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) cfg.dir = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) cfg.phy_channel = dma_spec->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) cfg.use_fixed_channel = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) if (D40_DT_FLAGS_HIGH_PRIO(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) cfg.high_priority = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) return dma_request_channel(cap, stedma40_filter, &cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* DMA ENGINE functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) static int d40_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) struct d40_chan *d40c =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) container_of(chan, struct d40_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) bool is_free_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) spin_lock_irqsave(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) dma_cookie_init(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) /* If no dma configuration is set use default configuration (memcpy) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) if (!d40c->configured) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) err = d40_config_memcpy(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) chan_err(d40c, "Failed to configure memcpy channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) goto mark_last_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) err = d40_allocate_channel(d40c, &is_free_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) chan_err(d40c, "Failed to allocate channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) d40c->configured = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) goto mark_last_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) pm_runtime_get_sync(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) d40_set_prio_realtime(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) if (chan_is_logical(d40c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) d40c->lcpa = d40c->base->lcpa_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) d40c->lcpa = d40c->base->lcpa_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) d40c->dma_cfg.dev_type *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) /* Unmask the Global Interrupt Mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) chan_is_logical(d40c) ? "logical" : "physical",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) d40c->phy_chan->num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) * Only write channel configuration to the DMA if the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) * resource is free. In case of multiple logical channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) * on the same physical resource, only the first write is necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) if (is_free_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) d40_config_write(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) mark_last_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) pm_runtime_mark_last_busy(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) pm_runtime_put_autosuspend(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) spin_unlock_irqrestore(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) static void d40_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) struct d40_chan *d40c =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) container_of(chan, struct d40_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) if (d40c->phy_chan == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) chan_err(d40c, "Cannot free unallocated channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) spin_lock_irqsave(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) err = d40_free_dma(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) chan_err(d40c, "Failed to free channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) spin_unlock_irqrestore(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) dma_addr_t dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) unsigned long dma_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) struct scatterlist dst_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) struct scatterlist src_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) sg_init_table(&dst_sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) sg_init_table(&src_sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) sg_dma_address(&dst_sg) = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) sg_dma_address(&src_sg) = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) sg_dma_len(&dst_sg) = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) sg_dma_len(&src_sg) = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) DMA_MEM_TO_MEM, dma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) unsigned int sg_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) unsigned long dma_flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) if (!is_slave_direction(direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) size_t buf_len, size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) enum dma_transfer_direction direction, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) unsigned int periods = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) struct dma_async_tx_descriptor *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) if (!sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) for (i = 0; i < periods; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) sg_dma_address(&sg[i]) = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) sg_dma_len(&sg[i]) = period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) dma_addr += period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) sg_chain(sg, periods + 1, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) txd = d40_prep_sg(chan, sg, sg, periods, direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) kfree(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) return txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) static enum dma_status d40_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) if (d40c->phy_chan == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) chan_err(d40c, "Cannot read status of unallocated channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (ret != DMA_COMPLETE && txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) dma_set_residue(txstate, stedma40_residue(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) if (d40_is_paused(d40c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) ret = DMA_PAUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) static void d40_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) if (d40c->phy_chan == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) chan_err(d40c, "Channel is not allocated!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) spin_lock_irqsave(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) /* Busy means that queued jobs are already being processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) if (!d40c->busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) (void) d40_queue_start(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) spin_unlock_irqrestore(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) static int d40_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) if (d40c->phy_chan == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) chan_err(d40c, "Channel is not allocated!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) spin_lock_irqsave(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) pm_runtime_get_sync(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) chan_err(d40c, "Failed to stop channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) d40_term_all(d40c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) pm_runtime_mark_last_busy(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) pm_runtime_put_autosuspend(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (d40c->busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) pm_runtime_mark_last_busy(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) pm_runtime_put_autosuspend(d40c->base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) d40c->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) spin_unlock_irqrestore(&d40c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) dma40_config_to_halfchannel(struct d40_chan *d40c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) struct stedma40_half_channel_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) u32 maxburst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) int psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) if (chan_is_logical(d40c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (maxburst >= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) psize = STEDMA40_PSIZE_LOG_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) else if (maxburst >= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) psize = STEDMA40_PSIZE_LOG_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) else if (maxburst >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) psize = STEDMA40_PSIZE_LOG_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) psize = STEDMA40_PSIZE_LOG_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) if (maxburst >= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) psize = STEDMA40_PSIZE_PHY_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) else if (maxburst >= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) psize = STEDMA40_PSIZE_PHY_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) else if (maxburst >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) psize = STEDMA40_PSIZE_PHY_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) psize = STEDMA40_PSIZE_PHY_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) info->psize = psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) static int d40_set_runtime_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) memcpy(&d40c->slave_config, config, sizeof(*config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) /* Runtime reconfiguration extension */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) static int d40_set_runtime_config_write(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) struct dma_slave_config *config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) enum dma_transfer_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) enum dma_slave_buswidth src_addr_width, dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) dma_addr_t config_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) u32 src_maxburst, dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) if (d40c->phy_chan == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) chan_err(d40c, "Channel is not allocated!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) src_addr_width = config->src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) src_maxburst = config->src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) dst_addr_width = config->dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) dst_maxburst = config->dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) if (direction == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) config_addr = config->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (cfg->dir != DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) dev_dbg(d40c->base->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) "channel was not configured for peripheral "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) "to memory transfer (%d) overriding\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) cfg->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) cfg->dir = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) /* Configure the memory side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) dst_addr_width = src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) if (dst_maxburst == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) dst_maxburst = src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) } else if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) config_addr = config->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) if (cfg->dir != DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) dev_dbg(d40c->base->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) "channel was not configured for memory "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) "to peripheral transfer (%d) overriding\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) cfg->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) cfg->dir = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) /* Configure the memory side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) src_addr_width = dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) if (src_maxburst == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) src_maxburst = dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) dev_err(d40c->base->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) "unrecognized channel direction %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) if (config_addr <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) dev_err(d40c->base->dev, "no address supplied\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) dev_err(d40c->base->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) src_maxburst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) src_addr_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) dst_maxburst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) dst_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) if (src_maxburst > 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) src_maxburst = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) } else if (dst_maxburst > 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) dst_maxburst = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) /* Only valid widths are; 1, 2, 4 and 8. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) !is_power_of_2(src_addr_width) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) !is_power_of_2(dst_addr_width))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) cfg->src_info.data_width = src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) cfg->dst_info.data_width = dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) src_maxburst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) dst_maxburst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) /* Fill in register values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) if (chan_is_logical(d40c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) /* These settings will take precedence later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) d40c->runtime_addr = config_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) d40c->runtime_direction = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) dev_dbg(d40c->base->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) "configured channel %s for %s, data width %d/%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) "maxburst %d/%d elements, LE, no flow control\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) dma_chan_name(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) (direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) src_addr_width, dst_addr_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) src_maxburst, dst_maxburst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) /* Initialization functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) struct d40_chan *chans, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) int num_chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) struct d40_chan *d40c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) INIT_LIST_HEAD(&dma->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) for (i = offset; i < offset + num_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) d40c = &chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) d40c->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) d40c->chan.device = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) spin_lock_init(&d40c->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) d40c->log_num = D40_PHY_CHAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) INIT_LIST_HEAD(&d40c->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) INIT_LIST_HEAD(&d40c->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) INIT_LIST_HEAD(&d40c->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) INIT_LIST_HEAD(&d40c->pending_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) INIT_LIST_HEAD(&d40c->client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) INIT_LIST_HEAD(&d40c->prepare_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) tasklet_setup(&d40c->tasklet, dma_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) list_add_tail(&d40c->chan.device_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) &dma->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) dev->device_prep_slave_sg = d40_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) dev->device_prep_dma_memcpy = d40_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) dev->directions = BIT(DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) * This controller can only access address at even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) * 32bit boundaries, i.e. 2^2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) dev->device_alloc_chan_resources = d40_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) dev->device_free_chan_resources = d40_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) dev->device_issue_pending = d40_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) dev->device_tx_status = d40_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) dev->device_config = d40_set_runtime_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) dev->device_pause = d40_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) dev->device_resume = d40_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) dev->device_terminate_all = d40_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) dev->dev = base->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) static int __init d40_dmaengine_init(struct d40_base *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) int num_reserved_chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) int err ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) d40_chan_init(base, &base->dma_slave, base->log_chans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 0, base->num_log_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) dma_cap_zero(base->dma_slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) d40_ops_init(base, &base->dma_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) err = dmaenginem_async_device_register(&base->dma_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) d40_err(base->dev, "Failed to register slave channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) d40_chan_init(base, &base->dma_memcpy, base->log_chans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) base->num_log_chans, base->num_memcpy_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) dma_cap_zero(base->dma_memcpy.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) d40_ops_init(base, &base->dma_memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) err = dmaenginem_async_device_register(&base->dma_memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) d40_err(base->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) "Failed to register memcpy only channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) d40_chan_init(base, &base->dma_both, base->phy_chans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 0, num_reserved_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) dma_cap_zero(base->dma_both.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) d40_ops_init(base, &base->dma_both);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) err = dmaenginem_async_device_register(&base->dma_both);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) d40_err(base->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) "Failed to register logical and physical capable channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) /* Suspend resume functionality */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) static int dma40_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) struct d40_base *base = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) ret = pm_runtime_force_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) if (base->lcpa_regulator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) ret = regulator_disable(base->lcpa_regulator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) static int dma40_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) struct d40_base *base = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) if (base->lcpa_regulator) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) ret = regulator_enable(base->lcpa_regulator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) return pm_runtime_force_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) static void dma40_backup(void __iomem *baseaddr, u32 *backup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) u32 *regaddr, int num, bool save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) void __iomem *addr = baseaddr + regaddr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) if (save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) backup[i] = readl_relaxed(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) writel_relaxed(backup[i], addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) static void d40_save_restore_registers(struct d40_base *base, bool save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) /* Save/Restore channel specific registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) for (i = 0; i < base->num_phy_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) if (base->phy_res[i].reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) idx = i * ARRAY_SIZE(d40_backup_regs_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) dma40_backup(addr, &base->reg_val_backup_chan[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) d40_backup_regs_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) ARRAY_SIZE(d40_backup_regs_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) /* Save/Restore global registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) dma40_backup(base->virtbase, base->reg_val_backup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) /* Save/Restore registers only existing on dma40 v3 and later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) if (base->gen_dmac.backup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) dma40_backup(base->virtbase, base->reg_val_backup_v4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) base->gen_dmac.backup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) base->gen_dmac.backup_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) static int dma40_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) struct d40_base *base = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) d40_save_restore_registers(base, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) /* Don't disable/enable clocks for v1 due to HW bugs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) if (base->rev != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) writel_relaxed(base->gcc_pwr_off_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) base->virtbase + D40_DREG_GCC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) static int dma40_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) struct d40_base *base = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) d40_save_restore_registers(base, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) base->virtbase + D40_DREG_GCC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) static const struct dev_pm_ops dma40_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) SET_LATE_SYSTEM_SLEEP_PM_OPS(dma40_suspend, dma40_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) SET_RUNTIME_PM_OPS(dma40_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) dma40_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) /* Initialization functions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) static int __init d40_phy_res_init(struct d40_base *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) int num_phy_chans_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) u32 val[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) int odd_even_bit = -2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) int gcc = D40_DREG_GCC_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) val[0] = readl(base->virtbase + D40_DREG_PRSME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) val[1] = readl(base->virtbase + D40_DREG_PRSMO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) for (i = 0; i < base->num_phy_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) base->phy_res[i].num = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) odd_even_bit += 2 * ((i % 2) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) /* Mark security only channels as occupied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) base->phy_res[i].allocated_src = D40_ALLOC_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) base->phy_res[i].reserved = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) D40_DREG_GCC_SRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) D40_DREG_GCC_DST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) base->phy_res[i].allocated_src = D40_ALLOC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) base->phy_res[i].reserved = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) num_phy_chans_avail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) spin_lock_init(&base->phy_res[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) /* Mark disabled channels as occupied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) int chan = base->plat_data->disabled_channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) base->phy_res[chan].reserved = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) D40_DREG_GCC_SRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) D40_DREG_GCC_DST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) num_phy_chans_avail--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) /* Mark soft_lli channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) int chan = base->plat_data->soft_lli_chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) base->phy_res[chan].use_soft_lli = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) dev_info(base->dev, "%d of %d physical DMA channels available\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) num_phy_chans_avail, base->num_phy_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) /* Verify settings extended vs standard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) val[0] = readl(base->virtbase + D40_DREG_PRTYP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) for (i = 0; i < base->num_phy_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) (val[0] & 0x3) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) dev_info(base->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) "[%s] INFO: channel %d is misconfigured (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) __func__, i, val[0] & 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) val[0] = val[0] >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) * To keep things simple, Enable all clocks initially.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) * The clocks will get managed later post channel allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) * The clocks for the event lines on which reserved channels exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) * are not managed here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) base->gcc_pwr_off_mask = gcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) return num_phy_chans_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) void __iomem *virtbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) struct d40_base *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) int num_log_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) int num_phy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) int num_memcpy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) int clk_ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) u32 pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) u32 cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) u8 rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) clk = clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) if (IS_ERR(clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) d40_err(&pdev->dev, "No matching clock found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) goto check_prepare_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) clk_ret = clk_prepare_enable(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) if (clk_ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) goto disable_unprepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) /* Get IO for DMAC base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) goto disable_unprepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) if (request_mem_region(res->start, resource_size(res),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) D40_NAME " I/O base") == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) goto release_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) virtbase = ioremap(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) if (!virtbase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) goto release_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) /* This is just a regular AMBA PrimeCell ID actually */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) for (pid = 0, i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) & 255) << (i * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) for (cid = 0, i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) & 255) << (i * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) if (cid != AMBA_CID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) goto unmap_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) AMBA_MANF_BITS(pid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) AMBA_VENDOR_ST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) goto unmap_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) * HW revision:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) * DB8500ed has revision 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) * ? has revision 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) * DB8500v1 has revision 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) * DB8500v2 has revision 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) * AP9540v1 has revision 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) * DB8540v1 has revision 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) rev = AMBA_REV_BITS(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) if (rev < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) goto unmap_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) /* The number of physical channels on this HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) if (plat_data->num_of_phy_chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) num_phy_chans = plat_data->num_of_phy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) /* The number of channels used for memcpy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) if (plat_data->num_of_memcpy_chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) num_memcpy_chans = plat_data->num_of_memcpy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) rev, &res->start, num_phy_chans, num_log_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) (num_phy_chans + num_log_chans + num_memcpy_chans) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) sizeof(struct d40_chan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) if (base == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) goto unmap_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) base->rev = rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) base->clk = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) base->num_memcpy_chans = num_memcpy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) base->num_phy_chans = num_phy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) base->num_log_chans = num_log_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) base->phy_start = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) base->phy_size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) base->virtbase = virtbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) base->plat_data = plat_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) base->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) base->log_chans = &base->phy_chans[num_phy_chans];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) if (base->plat_data->num_of_phy_chans == 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) base->gen_dmac.backup = d40_backup_regs_v4b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) base->gen_dmac.il = il_v4b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) base->gen_dmac.init_reg = dma_init_reg_v4b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) if (base->rev >= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) base->gen_dmac.backup = d40_backup_regs_v4a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) base->gen_dmac.realtime_en = D40_DREG_RSEG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) base->gen_dmac.il = il_v4a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) base->gen_dmac.init_reg = dma_init_reg_v4a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) base->phy_res = kcalloc(num_phy_chans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) sizeof(*base->phy_res),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) if (!base->phy_res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) goto free_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) base->lookup_phy_chans = kcalloc(num_phy_chans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) sizeof(*base->lookup_phy_chans),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) if (!base->lookup_phy_chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) goto free_phy_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) base->lookup_log_chans = kcalloc(num_log_chans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) sizeof(*base->lookup_log_chans),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) if (!base->lookup_log_chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) goto free_phy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) sizeof(d40_backup_regs_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) if (!base->reg_val_backup_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) goto free_log_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) base->lcla_pool.alloc_map = kcalloc(num_phy_chans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) * D40_LCLA_LINK_PER_EVENT_GRP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) sizeof(*base->lcla_pool.alloc_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) if (!base->lcla_pool.alloc_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) goto free_backup_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) sizeof(*base->regs_interrupt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) if (!base->regs_interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) goto free_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 0, SLAB_HWCACHE_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) if (base->desc_slab == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) goto free_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) free_regs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) kfree(base->regs_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) free_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) kfree(base->lcla_pool.alloc_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) free_backup_chan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) kfree(base->reg_val_backup_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) free_log_chans:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) kfree(base->lookup_log_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) free_phy_chans:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) kfree(base->lookup_phy_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) free_phy_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) kfree(base->phy_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) free_base:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) kfree(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) unmap_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) iounmap(virtbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) release_region:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) release_mem_region(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) check_prepare_enabled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) if (!clk_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) disable_unprepare:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) clk_disable_unprepare(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) if (!IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) clk_put(clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) static void __init d40_hw_init(struct d40_base *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) u32 prmseo[2] = {0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) u32 pcmis = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) u32 pcicr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) u32 reg_size = base->gen_dmac.init_reg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) for (i = 0; i < reg_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) writel(dma_init_reg[i].val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) base->virtbase + dma_init_reg[i].reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) /* Configure all our dma channels to default settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) for (i = 0; i < base->num_phy_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) activeo[i % 2] = activeo[i % 2] << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) == D40_ALLOC_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) activeo[i % 2] |= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) /* Enable interrupt # */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) pcmis = (pcmis << 1) | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) /* Clear interrupt # */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) pcicr = (pcicr << 1) | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) /* Set channel to physical mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) prmseo[i % 2] = prmseo[i % 2] << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) prmseo[i % 2] |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) /* Write which interrupt to enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) /* Write which interrupt to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) /* These are __initdata and cannot be accessed after init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) base->gen_dmac.init_reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) base->gen_dmac.init_reg_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) static int __init d40_lcla_allocate(struct d40_base *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) struct d40_lcla_pool *pool = &base->lcla_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) unsigned long *page_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) * To full fill this hardware requirement without wasting 256 kb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) * we allocate pages until we get an aligned one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) sizeof(*page_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) if (!page_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) /* Calculating how many pages that are required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) page_list[i] = __get_free_pages(GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) base->lcla_pool.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) if (!page_list[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) d40_err(base->dev, "Failed to allocate %d pages.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) base->lcla_pool.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) free_pages(page_list[j], base->lcla_pool.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) goto free_page_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) if ((virt_to_phys((void *)page_list[i]) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) (LCLA_ALIGNMENT - 1)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) free_pages(page_list[j], base->lcla_pool.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) base->lcla_pool.base = (void *)page_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) * After many attempts and no succees with finding the correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) * alignment, try with allocating a big buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) dev_warn(base->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) "[%s] Failed to get %d pages @ 18 bit align.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) __func__, base->lcla_pool.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) base->num_phy_chans +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) LCLA_ALIGNMENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) if (!base->lcla_pool.base_unaligned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) goto free_page_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) LCLA_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) pool->dma_addr = dma_map_single(base->dev, pool->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) SZ_1K * base->num_phy_chans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) if (dma_mapping_error(base->dev, pool->dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) pool->dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) goto free_page_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) writel(virt_to_phys(base->lcla_pool.base),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) base->virtbase + D40_DREG_LCLA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) free_page_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) kfree(page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) static int __init d40_of_probe(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) struct stedma40_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) int num_phy = 0, num_memcpy = 0, num_disabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) const __be32 *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) if (!pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) /* If absent this value will be obtained from h/w. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) of_property_read_u32(np, "dma-channels", &num_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) if (num_phy > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) pdata->num_of_phy_chans = num_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) list = of_get_property(np, "memcpy-channels", &num_memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) num_memcpy /= sizeof(*list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) d40_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) "Invalid number of memcpy channels specified (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) num_memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) pdata->num_of_memcpy_chans = num_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) of_property_read_u32_array(np, "memcpy-channels",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) dma40_memcpy_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) num_memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) list = of_get_property(np, "disabled-channels", &num_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) num_disabled /= sizeof(*list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) d40_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) "Invalid number of disabled channels specified (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) num_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) of_property_read_u32_array(np, "disabled-channels",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) pdata->disabled_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) num_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) pdata->disabled_channels[num_disabled] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) pdev->dev.platform_data = pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) static int __init d40_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) int ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) struct d40_base *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) int num_reserved_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) if (!plat_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) if (np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) if (d40_of_probe(pdev, np)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) goto report_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) goto report_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) base = d40_hw_detect_init(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) if (!base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) goto report_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) num_reserved_chans = d40_phy_res_init(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) platform_set_drvdata(pdev, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) spin_lock_init(&base->interrupt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) spin_lock_init(&base->execmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) /* Get IO for logical channel parameter address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) goto destroy_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) base->lcpa_size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) base->phy_lcpa = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) if (request_mem_region(res->start, resource_size(res),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) D40_NAME " I/O lcpa") == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) goto destroy_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) /* We make use of ESRAM memory for this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) val = readl(base->virtbase + D40_DREG_LCPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) if (res->start != val && val != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) __func__, val, &res->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) writel(res->start, base->virtbase + D40_DREG_LCPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) base->lcpa_base = ioremap(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) if (!base->lcpa_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) goto destroy_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) /* If lcla has to be located in ESRAM we don't need to allocate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) if (base->plat_data->use_esram_lcla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) "lcla_esram");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) d40_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) "No \"lcla_esram\" memory resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) goto destroy_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) base->lcla_pool.base = ioremap(res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) if (!base->lcla_pool.base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) goto destroy_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) writel(res->start, base->virtbase + D40_DREG_LCLA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) ret = d40_lcla_allocate(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) goto destroy_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) spin_lock_init(&base->lcla_pool.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) base->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) d40_err(&pdev->dev, "No IRQ defined\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) goto destroy_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) if (base->plat_data->use_esram_lcla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) if (IS_ERR(base->lcpa_regulator)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) ret = PTR_ERR(base->lcpa_regulator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) base->lcpa_regulator = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) goto destroy_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) ret = regulator_enable(base->lcpa_regulator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) d40_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) "Failed to enable lcpa_regulator\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) regulator_put(base->lcpa_regulator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) base->lcpa_regulator = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) goto destroy_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) writel_relaxed(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) pm_runtime_irq_safe(base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) pm_runtime_use_autosuspend(base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) pm_runtime_mark_last_busy(base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) pm_runtime_set_active(base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) pm_runtime_enable(base->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) ret = d40_dmaengine_init(base, num_reserved_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) goto destroy_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) d40_err(&pdev->dev, "Failed to set dma max seg size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) goto destroy_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) d40_hw_init(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) if (np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) ret = of_dma_controller_register(np, d40_xlate, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) "could not register of_dma_controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) dev_info(base->dev, "initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) destroy_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) kmem_cache_destroy(base->desc_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) if (base->virtbase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) iounmap(base->virtbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) iounmap(base->lcla_pool.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) base->lcla_pool.base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) if (base->lcla_pool.dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) SZ_1K * base->num_phy_chans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) free_pages((unsigned long)base->lcla_pool.base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) base->lcla_pool.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) kfree(base->lcla_pool.base_unaligned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) if (base->lcpa_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) iounmap(base->lcpa_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) if (base->phy_lcpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) release_mem_region(base->phy_lcpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) base->lcpa_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) if (base->phy_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) release_mem_region(base->phy_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) base->phy_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) if (base->clk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) clk_disable_unprepare(base->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) clk_put(base->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) if (base->lcpa_regulator) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) regulator_disable(base->lcpa_regulator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) regulator_put(base->lcpa_regulator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) kfree(base->lcla_pool.alloc_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) kfree(base->lookup_log_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) kfree(base->lookup_phy_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) kfree(base->phy_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) kfree(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) report_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) d40_err(&pdev->dev, "probe failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) static const struct of_device_id d40_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) { .compatible = "stericsson,dma40", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) static struct platform_driver d40_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) .name = D40_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) .pm = &dma40_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) .of_match_table = d40_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) static int __init stedma40_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) return platform_driver_probe(&d40_driver, d40_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) subsys_initcall(stedma40_init);