^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // Actions Semi Owl SoCs DMA driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) // Copyright (c) 2014 Actions Semi Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) // Author: David Liu <liuwei@actions-semi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) // Copyright (c) 2018 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) // Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define OWL_DMA_FRAME_MAX_LENGTH 0xfffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* Global DMA Controller Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define OWL_DMA_IRQ_PD0 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define OWL_DMA_IRQ_PD1 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define OWL_DMA_IRQ_PD2 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define OWL_DMA_IRQ_PD3 0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define OWL_DMA_IRQ_EN0 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define OWL_DMA_IRQ_EN1 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define OWL_DMA_IRQ_EN2 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define OWL_DMA_IRQ_EN3 0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define OWL_DMA_SECURE_ACCESS_CTL 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define OWL_DMA_NIC_QOS 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define OWL_DMA_DBGSEL 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define OWL_DMA_IDLE_STAT 0x2C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Channel Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define OWL_DMAX_MODE 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define OWL_DMAX_SOURCE 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define OWL_DMAX_DESTINATION 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define OWL_DMAX_FRAME_LEN 0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define OWL_DMAX_FRAME_CNT 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define OWL_DMAX_REMAIN_FRAME_CNT 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define OWL_DMAX_REMAIN_CNT 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define OWL_DMAX_SOURCE_STRIDE 0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define OWL_DMAX_DESTINATION_STRIDE 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define OWL_DMAX_START 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define OWL_DMAX_PAUSE 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define OWL_DMAX_CHAINED_CTL 0x2C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define OWL_DMAX_CONSTANT 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define OWL_DMAX_LINKLIST_CTL 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define OWL_DMAX_NEXT_DESCRIPTOR 0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define OWL_DMAX_INT_CTL 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define OWL_DMAX_INT_STATUS 0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* OWL_DMAX_MODE Bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define OWL_DMA_MODE_CB BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define OWL_DMA_MODE_CFE BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define OWL_DMA_MODE_LME BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define OWL_DMA_MODE_CME BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* OWL_DMAX_LINKLIST_CTL Bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define OWL_DMA_LLC_SUSPEND BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* OWL_DMAX_INT_CTL Bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define OWL_DMA_INTCTL_BLOCK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define OWL_DMA_INTCTL_FRAME BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define OWL_DMA_INTCTL_HALF_FRAME BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define OWL_DMA_INTCTL_LAST_FRAME BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* OWL_DMAX_INT_STATUS Bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define OWL_DMA_INTSTAT_BLOCK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define OWL_DMA_INTSTAT_FRAME BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define OWL_DMA_INTSTAT_HALF_FRAME BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define OWL_DMA_INTSTAT_LAST_FRAME BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Pack shift and newshift in a single word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define BIT_FIELD(val, width, shift, newshift) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Frame count value is fixed as 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define FCNT_VAL 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * enum owl_dmadesc_offsets - Describe DMA descriptor, hardware link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * list for dma transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * @OWL_DMADESC_NEXT_LLI: physical address of the next link list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * @OWL_DMADESC_SADDR: source physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * @OWL_DMADESC_DADDR: destination physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @OWL_DMADESC_FLEN: frame length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * @OWL_DMADESC_SRC_STRIDE: source stride
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * @OWL_DMADESC_DST_STRIDE: destination stride
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * @OWL_DMADESC_CTRLA: dma_mode and linklist ctrl config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * @OWL_DMADESC_CTRLB: interrupt config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * @OWL_DMADESC_CONST_NUM: data for constant fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * @OWL_DMADESC_SIZE: max size of this enum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) enum owl_dmadesc_offsets {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) OWL_DMADESC_NEXT_LLI = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) OWL_DMADESC_SADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) OWL_DMADESC_DADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) OWL_DMADESC_FLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) OWL_DMADESC_SRC_STRIDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) OWL_DMADESC_DST_STRIDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) OWL_DMADESC_CTRLA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) OWL_DMADESC_CTRLB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) OWL_DMADESC_CONST_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) OWL_DMADESC_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) enum owl_dma_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) S900_DMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) S700_DMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * struct owl_dma_lli - Link list for dma transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * @hw: hardware link list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * @phys: physical address of hardware link list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * @node: node for txd's lli_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct owl_dma_lli {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u32 hw[OWL_DMADESC_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @vd: virtual DMA descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @lli_list: link list of lli nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * @cyclic: flag to indicate cyclic transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct owl_dma_txd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct virt_dma_desc vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct list_head lli_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * struct owl_dma_pchan - Holder for the physical channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @id: physical index to this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * @base: virtual memory base for the dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * @vchan: the virtual channel currently being served by this physical channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct owl_dma_pchan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct owl_dma_vchan *vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * @vc: wrappped virtual channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * @pchan: the physical channel utilized by this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * @txd: active transaction on this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * @cfg: slave configuration for this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * @drq: physical DMA request ID for this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct owl_dma_vchan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct virt_dma_chan vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct owl_dma_pchan *pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct owl_dma_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct dma_slave_config cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u8 drq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * struct owl_dma - Holder for the Owl DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @dma: dma engine for this instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * @base: virtual memory base for the DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * @clk: clock for the DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * @lock: a lock to use when change DMA controller global register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * @lli_pool: a pool for the LLI descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * @irq: interrupt ID for the DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * @nr_pchans: the number of physical channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * @pchans: array of data for the physical channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * @nr_vchans: the number of physical channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * @vchans: array of data for the physical channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @devid: device id based on OWL SoC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct owl_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct dma_device dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct dma_pool *lli_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned int nr_pchans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct owl_dma_pchan *pchans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned int nr_vchans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct owl_dma_vchan *vchans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) enum owl_dma_id devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u32 val, bool state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) u32 regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) regval = readl(pchan->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) regval |= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) regval &= ~val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) writel(val, pchan->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) writel(data, pchan->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return readl(pchan->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u32 regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) regval = readl(od->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) regval |= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) regval &= ~val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) writel(val, od->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) writel(data, od->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static u32 dma_readl(struct owl_dma *od, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return readl(od->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static inline struct owl_dma *to_owl_dma(struct dma_device *dd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return container_of(dd, struct owl_dma, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static struct device *chan2dev(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return &chan->dev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return container_of(chan, struct owl_dma_vchan, vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return container_of(tx, struct owl_dma_txd, vd.tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u32 ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ctl = BIT_FIELD(mode, 4, 28, 28) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) BIT_FIELD(mode, 8, 16, 20) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) BIT_FIELD(mode, 4, 8, 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) BIT_FIELD(mode, 6, 0, 10) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) BIT_FIELD(llc_ctl, 2, 10, 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) BIT_FIELD(llc_ctl, 2, 8, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static inline u32 llc_hw_ctrlb(u32 int_ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) u32 ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * Irrespective of the SoC, ctrlb value starts filling from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * bit 18.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ctl = BIT_FIELD(int_ctl, 7, 0, 18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static u32 llc_hw_flen(struct owl_dma_lli *lli)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return lli->hw[OWL_DMADESC_FLEN] & GENMASK(19, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static void owl_dma_free_lli(struct owl_dma *od,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct owl_dma_lli *lli)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) list_del(&lli->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dma_pool_free(od->lli_pool, lli, lli->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct owl_dma_lli *lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (!lli)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) INIT_LIST_HEAD(&lli->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) lli->phys = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct owl_dma_lli *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct owl_dma_lli *next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) bool is_cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!is_cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) list_add_tail(&next->node, &txd->lli_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) prev->hw[OWL_DMADESC_NEXT_LLI] = next->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) prev->hw[OWL_DMADESC_CTRLA] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) llc_hw_ctrla(OWL_DMA_MODE_LME, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct owl_dma_lli *lli,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dma_addr_t src, dma_addr_t dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u32 len, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct dma_slave_config *sconfig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) bool is_cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) u32 mode, ctrlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) mode = OWL_DMA_MODE_PW(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) OWL_DMA_MODE_DAM_INC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) mode |= OWL_DMA_MODE_TS(vchan->drq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * Hardware only supports 32bit and 8bit buswidth. Since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * default is 32bit, select 8bit only when requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) mode |= OWL_DMA_MODE_NDDBW_8BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) mode |= OWL_DMA_MODE_TS(vchan->drq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * Hardware only supports 32bit and 8bit buswidth. Since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * default is 32bit, select 8bit only when requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) mode |= OWL_DMA_MODE_NDDBW_8BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) lli->hw[OWL_DMADESC_CTRLA] = llc_hw_ctrla(mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) OWL_DMA_LLC_SAV_LOAD_NEXT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) OWL_DMA_LLC_DAV_LOAD_NEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (is_cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) lli->hw[OWL_DMADESC_NEXT_LLI] = 0; /* One link list by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) lli->hw[OWL_DMADESC_SADDR] = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) lli->hw[OWL_DMADESC_DADDR] = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) lli->hw[OWL_DMADESC_SRC_STRIDE] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) lli->hw[OWL_DMADESC_DST_STRIDE] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (od->devid == S700_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* Max frame length is 1MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) lli->hw[OWL_DMADESC_FLEN] = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * On S700, word starts from offset 0x1C is shared between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * frame count and ctrlb, where first 12 bits are for frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * count and rest of 20 bits are for ctrlb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) lli->hw[OWL_DMADESC_CTRLB] = FCNT_VAL | ctrlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * On S900, word starts from offset 0xC is shared between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * frame length (max frame length is 1MB) and frame count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * where first 20 bits are for frame length and rest of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * 12 bits are for frame count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) lli->hw[OWL_DMADESC_CTRLB] = ctrlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct owl_dma_vchan *vchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct owl_dma_pchan *pchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) for (i = 0; i < od->nr_pchans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) pchan = &od->pchans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) spin_lock_irqsave(&od->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (!pchan->vchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) pchan->vchan = vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) spin_unlock_irqrestore(&od->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) spin_unlock_irqrestore(&od->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) val = dma_readl(od, OWL_DMA_IDLE_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return !(val & (1 << pchan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static void owl_dma_terminate_pchan(struct owl_dma *od,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct owl_dma_pchan *pchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) u32 irq_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) pchan_writel(pchan, OWL_DMAX_START, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) spin_lock_irqsave(&od->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (irq_pd & (1 << pchan->id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) dev_warn(od->dma.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) "terminating pchan %d that still has pending irq\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) pchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) pchan->vchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) spin_unlock_irqrestore(&od->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) pchan_writel(pchan, 1, OWL_DMAX_PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pchan_writel(pchan, 0, OWL_DMAX_PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct owl_dma_pchan *pchan = vchan->pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct owl_dma_lli *lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) u32 int_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) vchan->txd = txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* Wait for channel inactive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) while (owl_dma_pchan_busy(od, pchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) lli = list_first_entry(&txd->lli_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct owl_dma_lli, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (txd->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) int_ctl = OWL_DMA_INTCTL_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* Clear IRQ status for this pchan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) spin_lock_irqsave(&od->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) spin_unlock_irqrestore(&od->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* Start DMA transfer for this pchan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) pchan_writel(pchan, OWL_DMAX_START, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* Ensure that the physical channel is stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) owl_dma_terminate_pchan(od, vchan->pchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) vchan->pchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static irqreturn_t owl_dma_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct owl_dma *od = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct owl_dma_vchan *vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct owl_dma_pchan *pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) unsigned long pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) unsigned int global_irq_pending, chan_irq_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) spin_lock(&od->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) pending = dma_readl(od, OWL_DMA_IRQ_PD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Clear IRQ status for each pchan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) for_each_set_bit(i, &pending, od->nr_pchans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) pchan = &od->pchans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* Clear pending IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) dma_writel(od, OWL_DMA_IRQ_PD0, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* Check missed pending IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) for (i = 0; i < od->nr_pchans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) pchan = &od->pchans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) pchan_readl(pchan, OWL_DMAX_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) dma_readl(od, OWL_DMA_IRQ_PD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (chan_irq_pending && !(global_irq_pending & BIT(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) dev_dbg(od->dma.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) "global and channel IRQ pending match err\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* Clear IRQ status for this pchan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) pchan_update(pchan, OWL_DMAX_INT_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 0xff, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* Update global IRQ pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) pending |= BIT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) spin_unlock(&od->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) for_each_set_bit(i, &pending, od->nr_pchans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct owl_dma_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) pchan = &od->pchans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) vchan = pchan->vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (!vchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) dev_warn(od->dma.dev, "no vchan attached on pchan %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) pchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) spin_lock(&vchan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) txd = vchan->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (txd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) vchan->txd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) vchan_cookie_complete(&txd->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * Start the next descriptor (if any),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * otherwise free this channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (vchan_next_desc(&vchan->vc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) owl_dma_start_next_txd(vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) owl_dma_phy_free(od, vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) spin_unlock(&vchan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct owl_dma_lli *lli, *_lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (unlikely(!txd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) list_for_each_entry_safe(lli, _lli, &txd->lli_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) owl_dma_free_lli(od, lli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) kfree(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static void owl_dma_desc_free(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct owl_dma *od = to_owl_dma(vd->tx.chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct owl_dma_txd *txd = to_owl_txd(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) owl_dma_free_txd(od, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static int owl_dma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct owl_dma *od = to_owl_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct owl_dma_vchan *vchan = to_owl_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) spin_lock_irqsave(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (vchan->pchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) owl_dma_phy_free(od, vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (vchan->txd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) owl_dma_desc_free(&vchan->txd->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) vchan->txd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) vchan_get_all_descriptors(&vchan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) spin_unlock_irqrestore(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) vchan_dma_desc_free_list(&vchan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static int owl_dma_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct owl_dma_vchan *vchan = to_owl_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* Reject definitely invalid configurations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static int owl_dma_pause(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct owl_dma_vchan *vchan = to_owl_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) spin_lock_irqsave(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) owl_dma_pause_pchan(vchan->pchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) spin_unlock_irqrestore(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static int owl_dma_resume(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct owl_dma_vchan *vchan = to_owl_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (!vchan->pchan && !vchan->txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) spin_lock_irqsave(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) owl_dma_resume_pchan(vchan->pchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) spin_unlock_irqrestore(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct owl_dma_pchan *pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct owl_dma_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct owl_dma_lli *lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) unsigned int next_lli_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) size_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) pchan = vchan->pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) txd = vchan->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (!pchan || !txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* Get remain count of current node in link list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Loop through the preceding nodes to get total remaining bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) list_for_each_entry(lli, &txd->lli_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* Start from the next active node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (lli->phys == next_lli_phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) list_for_each_entry(lli, &txd->lli_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) bytes += llc_hw_flen(lli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static enum dma_status owl_dma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct dma_tx_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct owl_dma_vchan *vchan = to_owl_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct owl_dma_lli *lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct owl_dma_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) size_t bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ret = dma_cookie_status(chan, cookie, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (ret == DMA_COMPLETE || !state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) spin_lock_irqsave(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) vd = vchan_find_desc(&vchan->vc, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (vd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) txd = to_owl_txd(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) list_for_each_entry(lli, &txd->lli_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) bytes += llc_hw_flen(lli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) bytes = owl_dma_getbytes_chan(vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) spin_unlock_irqrestore(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) dma_set_residue(state, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct owl_dma_pchan *pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) pchan = owl_dma_get_pchan(od, vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (!pchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) vchan->pchan = pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) owl_dma_start_next_txd(vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) static void owl_dma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) struct owl_dma_vchan *vchan = to_owl_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) spin_lock_irqsave(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (vchan_issue_pending(&vchan->vc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (!vchan->pchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) owl_dma_phy_alloc_and_start(vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) spin_unlock_irqrestore(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) static struct dma_async_tx_descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) *owl_dma_prep_memcpy(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) dma_addr_t dst, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct owl_dma *od = to_owl_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct owl_dma_vchan *vchan = to_owl_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct owl_dma_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct owl_dma_lli *lli, *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) size_t offset, bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (!txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) INIT_LIST_HEAD(&txd->lli_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /* Process the transfer as frame by frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) for (offset = 0; offset < len; offset += bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) lli = owl_dma_alloc_lli(od);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (!lli) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) dev_warn(chan2dev(chan), "failed to allocate lli\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) goto err_txd_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) bytes, DMA_MEM_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) &vchan->cfg, txd->cyclic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) dev_warn(chan2dev(chan), "failed to config lli\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) goto err_txd_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) prev = owl_dma_add_lli(txd, prev, lli, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) err_txd_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) owl_dma_free_txd(od, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) static struct dma_async_tx_descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) *owl_dma_prep_slave_sg(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct owl_dma *od = to_owl_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct owl_dma_vchan *vchan = to_owl_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct dma_slave_config *sconfig = &vchan->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct owl_dma_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct owl_dma_lli *lli, *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) dma_addr_t addr, src = 0, dst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (!txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) INIT_LIST_HEAD(&txd->lli_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (len > OWL_DMA_FRAME_MAX_LENGTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) dev_err(od->dma.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) "frame length exceeds max supported length");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) goto err_txd_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) lli = owl_dma_alloc_lli(od);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (!lli) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) dev_err(chan2dev(chan), "failed to allocate lli");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) goto err_txd_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) src = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) dst = sconfig->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) src = sconfig->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) dst = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) txd->cyclic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) dev_warn(chan2dev(chan), "failed to config lli");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) goto err_txd_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) prev = owl_dma_add_lli(txd, prev, lli, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) err_txd_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) owl_dma_free_txd(od, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static struct dma_async_tx_descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) *owl_prep_dma_cyclic(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) dma_addr_t buf_addr, size_t buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct owl_dma *od = to_owl_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct owl_dma_vchan *vchan = to_owl_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct dma_slave_config *sconfig = &vchan->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct owl_dma_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct owl_dma_lli *lli, *prev = NULL, *first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) dma_addr_t src = 0, dst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) unsigned int periods = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (!txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) INIT_LIST_HEAD(&txd->lli_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) txd->cyclic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) for (i = 0; i < periods; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) lli = owl_dma_alloc_lli(od);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (!lli) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) dev_warn(chan2dev(chan), "failed to allocate lli");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) goto err_txd_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) src = buf_addr + (period_len * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) dst = sconfig->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) } else if (dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) src = sconfig->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) dst = buf_addr + (period_len * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) dir, sconfig, txd->cyclic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) dev_warn(chan2dev(chan), "failed to config lli");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) goto err_txd_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) first = lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) prev = owl_dma_add_lli(txd, prev, lli, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* close the cyclic list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) owl_dma_add_lli(txd, prev, first, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) err_txd_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) owl_dma_free_txd(od, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static void owl_dma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct owl_dma_vchan *vchan = to_owl_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /* Ensure all queued descriptors are freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) vchan_free_chan_resources(&vchan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static inline void owl_dma_free(struct owl_dma *od)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct owl_dma_vchan *vchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct owl_dma_vchan *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) list_for_each_entry_safe(vchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) next, &od->dma.channels, vc.chan.device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) list_del(&vchan->vc.chan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) tasklet_kill(&vchan->vc.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct owl_dma *od = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct owl_dma_vchan *vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) u8 drq = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (drq > od->nr_vchans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) chan = dma_get_any_slave_channel(&od->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) vchan = to_owl_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) vchan->drq = drq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) static const struct of_device_id owl_dma_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) { .compatible = "actions,s700-dma", .data = (void *)S700_DMA,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) { /* sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) MODULE_DEVICE_TABLE(of, owl_dma_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static int owl_dma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct owl_dma *od;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) int ret, i, nr_channels, nr_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (!od)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) od->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (IS_ERR(od->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return PTR_ERR(od->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) ret = of_property_read_u32(np, "dma-channels", &nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) dev_err(&pdev->dev, "can't get dma-channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) ret = of_property_read_u32(np, "dma-requests", &nr_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) dev_err(&pdev->dev, "can't get dma-requests\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) nr_channels, nr_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) od->nr_pchans = nr_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) od->nr_vchans = nr_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) platform_set_drvdata(pdev, od);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) spin_lock_init(&od->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) od->dma.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) od->dma.device_tx_status = owl_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) od->dma.device_issue_pending = owl_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) od->dma.device_config = owl_dma_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) od->dma.device_pause = owl_dma_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) od->dma.device_resume = owl_dma_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) od->dma.device_terminate_all = owl_dma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) od->dma.directions = BIT(DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) INIT_LIST_HEAD(&od->dma.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) od->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (IS_ERR(od->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dev_err(&pdev->dev, "unable to get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) return PTR_ERR(od->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * Eventhough the DMA controller is capable of generating 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * IRQ's for DMA priority feature, we only use 1 IRQ for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * simplification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) od->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) dev_name(&pdev->dev), od);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) dev_err(&pdev->dev, "unable to request IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /* Init physical channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) sizeof(struct owl_dma_pchan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (!od->pchans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) for (i = 0; i < od->nr_pchans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) struct owl_dma_pchan *pchan = &od->pchans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) pchan->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) pchan->base = od->base + OWL_DMA_CHAN_BASE(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* Init virtual channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) sizeof(struct owl_dma_vchan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (!od->vchans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) for (i = 0; i < od->nr_vchans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) struct owl_dma_vchan *vchan = &od->vchans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) vchan->vc.desc_free = owl_dma_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) vchan_init(&vchan->vc, &od->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /* Create a pool of consistent memory blocks for hardware descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) sizeof(struct owl_dma_lli),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) __alignof__(struct owl_dma_lli),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (!od->lli_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) clk_prepare_enable(od->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) ret = dma_async_device_register(&od->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) dev_err(&pdev->dev, "failed to register DMA engine device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) goto err_pool_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /* Device-tree DMA controller registration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) ret = of_dma_controller_register(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) owl_dma_of_xlate, od);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) dev_err(&pdev->dev, "of_dma_controller_register failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) goto err_dma_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) err_dma_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) dma_async_device_unregister(&od->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) err_pool_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) clk_disable_unprepare(od->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) dma_pool_destroy(od->lli_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static int owl_dma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) struct owl_dma *od = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) dma_async_device_unregister(&od->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) /* Mask all interrupts for this execution environment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) dma_writel(od, OWL_DMA_IRQ_EN0, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /* Make sure we won't have any further interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) devm_free_irq(od->dma.dev, od->irq, od);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) owl_dma_free(od);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) clk_disable_unprepare(od->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) dma_pool_destroy(od->lli_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static struct platform_driver owl_dma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) .probe = owl_dma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) .remove = owl_dma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) .name = "dma-owl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) .of_match_table = of_match_ptr(owl_dma_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static int owl_dma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return platform_driver_register(&owl_dma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) subsys_initcall(owl_dma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static void __exit owl_dma_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) platform_driver_unregister(&owl_dma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) module_exit(owl_dma_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) MODULE_LICENSE("GPL");