^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * mtu3_qmu.c - Queue Management Unit driver for device controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2016 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Queue Management Unit (QMU) is designed to unload SW effort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * to serve DMA interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * SW links data buffers and triggers QMU to send / receive data to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * host / from device at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * And now only GPD is supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * For more detailed information, please refer to QMU Programming Guide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "mtu3.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "mtu3_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define QMU_CHECKSUM_LEN 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define GPD_FLAGS_HWO BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define GPD_FLAGS_BDP BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define GPD_FLAGS_BPS BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define GPD_FLAGS_ZLP BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define GPD_FLAGS_IOC BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define GPD_RX_BUF_LEN_OG(x) (((x) & 0xffff) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define GPD_RX_BUF_LEN_EL(x) (((x) & 0xfffff) << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define GPD_RX_BUF_LEN(mtu, x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) typeof(x) x_ = (x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define GPD_DATA_LEN_OG(x) ((x) & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define GPD_DATA_LEN_EL(x) ((x) & 0xfffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define GPD_DATA_LEN(mtu, x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) typeof(x) x_ = (x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define GPD_EXT_FLAG_ZLP BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define GPD_EXT_NGP_OG(x) (((x) & 0xf) << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define GPD_EXT_BUF_OG(x) (((x) & 0xf) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define GPD_EXT_NGP_EL(x) (((x) & 0xf) << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define GPD_EXT_BUF_EL(x) (((x) & 0xf) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define GPD_EXT_NGP(mtu, x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) typeof(x) x_ = (x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define GPD_EXT_BUF(mtu, x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) typeof(x) x_ = (x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define HILO_DMA(hi, lo) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 txcpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u32 txhiar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u32 rxcpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u32 rxhiar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u32 tqhiar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) mtu3_writel(mbase, USB_QMU_TQSAR(epnum),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) cpu_to_le32(lower_32_bits(dma)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) tqhiar &= ~QMU_START_ADDR_HI_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u32 rqhiar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) mtu3_writel(mbase, USB_QMU_RQSAR(epnum),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) cpu_to_le32(lower_32_bits(dma)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) rqhiar &= ~QMU_START_ADDR_HI_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dma_addr_t dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) dma_addr_t dma_base = ring->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct qmu_gpd *gpd_head = ring->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (offset >= MAX_GPD_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return gpd_head + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct qmu_gpd *gpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) dma_addr_t dma_base = ring->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct qmu_gpd *gpd_head = ring->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) offset = gpd - gpd_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (offset >= MAX_GPD_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return dma_base + (offset * sizeof(*gpd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ring->start = gpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ring->enqueue = gpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ring->dequeue = gpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ring->end = gpd + MAX_GPD_NUM - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static void reset_gpd_list(struct mtu3_ep *mep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct mtu3_gpd_ring *ring = &mep->gpd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct qmu_gpd *gpd = ring->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (gpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) gpd_ring_init(ring, gpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct qmu_gpd *gpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct mtu3_gpd_ring *ring = &mep->gpd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* software own all gpds as default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (gpd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) gpd_ring_init(ring, gpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) void mtu3_gpd_ring_free(struct mtu3_ep *mep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct mtu3_gpd_ring *ring = &mep->gpd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) dma_pool_free(mep->mtu->qmu_gpd_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ring->start, ring->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) memset(ring, 0, sizeof(*ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void mtu3_qmu_resume(struct mtu3_ep *mep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct mtu3 *mtu = mep->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void __iomem *mbase = mtu->mac_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int epnum = mep->epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) mtu3_writel(mbase, offset, QMU_Q_RESUME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) mtu3_writel(mbase, offset, QMU_Q_RESUME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (ring->enqueue < ring->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ring->enqueue++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ring->enqueue = ring->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return ring->enqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (ring->dequeue < ring->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ring->dequeue++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ring->dequeue = ring->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return ring->dequeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* check if a ring is emtpy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct qmu_gpd *enq = ring->enqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct qmu_gpd *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (ring->enqueue < ring->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) next = enq + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) next = ring->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* one gpd is reserved to simplify gpd preparation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return next == ring->dequeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int mtu3_prepare_transfer(struct mtu3_ep *mep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return gpd_ring_empty(&mep->gpd_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct qmu_gpd *enq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct mtu3_gpd_ring *ring = &mep->gpd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct qmu_gpd *gpd = ring->enqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct usb_request *req = &mreq->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct mtu3 *mtu = mep->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) dma_addr_t enq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) u32 ext_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) gpd->dw0_info = 0; /* SW own it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* get the next GPD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) enq = advance_enq_gpd(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) enq_dma = gpd_virt_to_dma(ring, enq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) mep->epnum, gpd, enq, &enq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) gpd->dw0_info = cpu_to_le32(ext_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (req->zero) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (mtu->gen2cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* prevent reorder, make sure GPD's HWO is set last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) mreq->gpd = gpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) trace_mtu3_prepare_gpd(mep, gpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct qmu_gpd *enq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct mtu3_gpd_ring *ring = &mep->gpd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct qmu_gpd *gpd = ring->enqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct usb_request *req = &mreq->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct mtu3 *mtu = mep->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) dma_addr_t enq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u32 ext_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) gpd->dw0_info = 0; /* SW own it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* get the next GPD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) enq = advance_enq_gpd(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) enq_dma = gpd_virt_to_dma(ring, enq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) mep->epnum, gpd, enq, &enq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) gpd->dw3_info = cpu_to_le32(ext_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* prevent reorder, make sure GPD's HWO is set last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) mreq->gpd = gpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) trace_mtu3_prepare_gpd(mep, gpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (mep->is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) mtu3_prepare_tx_gpd(mep, mreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) mtu3_prepare_rx_gpd(mep, mreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int mtu3_qmu_start(struct mtu3_ep *mep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct mtu3 *mtu = mep->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) void __iomem *mbase = mtu->mac_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct mtu3_gpd_ring *ring = &mep->gpd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u8 epnum = mep->epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (mep->is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* set QMU start address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) write_txq_start_addr(mbase, epnum, ring->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* send zero length packet according to ZLP flag in GPD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) mtu3_writel(mbase, U3D_TQERRIESR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) write_rxq_start_addr(mbase, epnum, ring->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* don't expect ZLP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* move to next GPD when receive ZLP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) mtu3_writel(mbase, U3D_RQERRIESR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* may called in atomic context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) void mtu3_qmu_stop(struct mtu3_ep *mep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct mtu3 *mtu = mep->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) void __iomem *mbase = mtu->mac_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int epnum = mep->epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) u32 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) u32 qcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) mtu3_writel(mbase, qcsr, QMU_Q_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ret = readl_poll_timeout_atomic(mbase + qcsr, value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) !(value & QMU_Q_ACTIVE), 1, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) void mtu3_qmu_flush(struct mtu3_ep *mep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ((mep->is_in) ? "TX" : "RX"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /*Stop QMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) mtu3_qmu_stop(mep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) reset_gpd_list(mep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * QMU can't transfer zero length packet directly (a hardware limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * on old SoCs), so when needs to send ZLP, we intentionally trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * a length error interrupt, and in the ISR sends a ZLP by BMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct mtu3_ep *mep = mtu->in_eps + epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct mtu3_gpd_ring *ring = &mep->gpd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) void __iomem *mbase = mtu->mac_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct qmu_gpd *gpd_current = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct mtu3_request *mreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) dma_addr_t cur_gpd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) u32 txcsr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) mreq = next_request(mep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (mreq && mreq->request.length != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (GPD_DATA_LEN(mtu, le32_to_cpu(gpd_current->dw3_info)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) trace_mtu3_zlp_exp_gpd(mep, gpd_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* prevent reorder, make sure GPD's HWO is set last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* by pass the current GDP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*enable DMAREQEN, switch back to QMU mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) mtu3_qmu_resume(mep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * NOTE: request list maybe is already empty as following case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * tasklet process both of them)-->qmu_interrupt for second one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * To avoid upper case, put qmu_done_tx in ISR directly to process it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct mtu3_ep *mep = mtu->in_eps + epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct mtu3_gpd_ring *ring = &mep->gpd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) void __iomem *mbase = mtu->mac_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct qmu_gpd *gpd = ring->dequeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct qmu_gpd *gpd_current = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct usb_request *request = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct mtu3_request *mreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dma_addr_t cur_gpd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /*transfer phy address got from QMU register to virtual address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) __func__, epnum, gpd, gpd_current, ring->enqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) mreq = next_request(mep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (mreq == NULL || mreq->gpd != gpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) dev_err(mtu->dev, "no correct TX req is found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) request = &mreq->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) trace_mtu3_complete_gpd(mep, gpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) mtu3_req_complete(mep, request, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) gpd = advance_deq_gpd(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) __func__, epnum, ring->dequeue, ring->enqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct mtu3_ep *mep = mtu->out_eps + epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct mtu3_gpd_ring *ring = &mep->gpd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) void __iomem *mbase = mtu->mac_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct qmu_gpd *gpd = ring->dequeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct qmu_gpd *gpd_current = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct usb_request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct mtu3_request *mreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dma_addr_t cur_gpd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) cur_gpd_dma = read_rxq_cur_addr(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) __func__, epnum, gpd, gpd_current, ring->enqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) mreq = next_request(mep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (mreq == NULL || mreq->gpd != gpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dev_err(mtu->dev, "no correct RX req is found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) req = &mreq->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) trace_mtu3_complete_gpd(mep, gpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) mtu3_req_complete(mep, req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) gpd = advance_deq_gpd(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) __func__, epnum, ring->dequeue, ring->enqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) for (i = 1; i < mtu->num_eps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (done_status & QMU_RX_DONE_INT(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) qmu_done_rx(mtu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (done_status & QMU_TX_DONE_INT(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) qmu_done_tx(mtu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) void __iomem *mbase = mtu->mac_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) u32 errval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) errval = mtu3_readl(mbase, U3D_RQERRIR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) for (i = 1; i < mtu->num_eps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (errval & QMU_RX_CS_ERR(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dev_err(mtu->dev, "Rx %d CS error!\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (errval & QMU_RX_LEN_ERR(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dev_err(mtu->dev, "RX %d Length error\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) mtu3_writel(mbase, U3D_RQERRIR0, errval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (qmu_status & RXQ_ZLPERR_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) errval = mtu3_readl(mbase, U3D_RQERRIR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) for (i = 1; i < mtu->num_eps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (errval & QMU_RX_ZLP_ERR(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) mtu3_writel(mbase, U3D_RQERRIR1, errval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) errval = mtu3_readl(mbase, U3D_TQERRIR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) for (i = 1; i < mtu->num_eps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (errval & QMU_TX_CS_ERR(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) dev_err(mtu->dev, "Tx %d checksum error!\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (errval & QMU_TX_LEN_ERR(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) qmu_tx_zlp_error_handler(mtu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) mtu3_writel(mbase, U3D_TQERRIR0, errval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) void __iomem *mbase = mtu->mac_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) u32 qmu_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) u32 qmu_done_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /* U3D_QISAR1 is read update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) qmu_status = mtu3_readl(mbase, U3D_QISAR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) qmu_status &= mtu3_readl(mbase, U3D_QIER1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) qmu_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) trace_mtu3_qmu_isr(qmu_done_status, qmu_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (qmu_done_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) qmu_done_isr(mtu, qmu_done_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (qmu_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) qmu_exception_isr(mtu, qmu_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int mtu3_qmu_init(struct mtu3 *mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (!mtu->qmu_gpd_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) void mtu3_qmu_exit(struct mtu3 *mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dma_pool_destroy(mtu->qmu_gpd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }