^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of_gpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <uapi/linux/rk-pcie-dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "rockchip-pcie-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* dma transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Write buffer format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * 0 4 8 0xc 0x10 SZ_1M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * ------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * |0x12345678|local idx(0-7)|data size|reserve |data |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * ------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Byte 3-0: Receiver check if a valid data package arrived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Byte 7-4: As a index for data rcv ack buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Byte 11-8: Actual data size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Data rcv ack buffer format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * 0 4B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * --------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * |0xdeadbeef |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * --------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Data free ack buffer format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * 0 4B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * --------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * |0xcafebabe |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * --------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * RC EP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * - --------- ---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * | | 1MB | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * | |------ | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * 8MB |wr buf | -> |rd buf |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * - --------- ---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * | | 1MB | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * | |------ | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * 8MB |rd buf | <- |wr buf |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * - --------- ---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * | | 4B | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * | |------ | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * 32B | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * | |scan | <- |data |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * | | | |rcv |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * | | | |ack |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * | | | |send |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * - --------- ---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * | | 4B | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * | |------ | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * 32B |data | -> |scan |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * | |rcv | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * | |ack | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * | |send | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * - --------- ---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * | | 4B | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * | |------ | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * 32B | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * | |scan | <- |data |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * | | | |free |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * | | | |ack |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * | | | |send |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * - --------- ---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * | |4B | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * | |------ | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * 32B |data | -> |scan |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * | |free | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * | |ack | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * | |send | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * - --------- ---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define NODE_SIZE (sizeof(unsigned int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define PCIE_DMA_ACK_BLOCK_SIZE (NODE_SIZE * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define PCIE_DMA_BUF_CNT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define PCIE_DMA_DATA_CHECK 0x12345678
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define PCIE_DMA_DATA_ACK_CHECK 0xdeadbeef
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define PCIE_DMA_DATA_FREE_ACK_CHECK 0xcafebabe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define PCIE_DMA_PARAM_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define PCIE_DMA_CHN0 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) enum transfer_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) PCIE_DMA_DATA_SND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) PCIE_DMA_DATA_RCV_ACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) PCIE_DMA_DATA_FREE_ACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) PCIE_DMA_READ_REMOTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int enable_check_sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct pcie_misc_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct miscdevice dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct dma_trx_obj *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void *rk_pcie_map_kernel(phys_addr_t start, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void rk_pcie_unmap_kernel(void *vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static inline bool is_rc(struct dma_trx_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return (obj->busno == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static unsigned int rk_pcie_check_sum(unsigned int *src, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) size /= sizeof(*src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) while (size-- > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) result ^= *src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int rk_pcie_handle_dma_interrupt(struct dma_trx_obj *obj, u32 chn, enum dma_dir dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct dma_table *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) cur = obj->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) pr_err("no pcie dma table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) obj->dma_free = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) obj->irq_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (cur->dir == DMA_TO_BUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (list_empty(&obj->tbl_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (obj->dma_free &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) obj->loop_count >= obj->loop_count_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) complete(&obj->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void rk_pcie_prepare_dma(struct dma_trx_obj *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned int idx, unsigned int bus_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) unsigned int local_idx, size_t buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) enum transfer_type type, int chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct device *dev = obj->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) phys_addr_t local, bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct dma_table *table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned int checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) case PCIE_DMA_DATA_SND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) table = obj->table[PCIE_DMA_DATA_SND_TABLE_OFFSET + local_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) table->type = PCIE_DMA_DATA_SND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) table->dir = DMA_TO_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) local = obj->local_mem_start + local_idx * obj->buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) bus = obj->remote_mem_start + bus_idx * obj->buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) virt = obj->local_mem_base + local_idx * obj->buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (obj->addr_reverse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (is_rc(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) local += obj->rd_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) virt += obj->rd_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) bus += obj->wr_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (!is_rc(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) local += obj->rd_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) virt += obj->rd_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) bus += obj->wr_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) obj->begin = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) dma_sync_single_for_device(dev, local, buf_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) obj->end = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) obj->cache_time_total += ktime_to_ns(ktime_sub(obj->end, obj->begin));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) writel(PCIE_DMA_DATA_CHECK, virt + obj->set_data_check_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) writel(local_idx, virt + obj->set_local_idx_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) writel(buf_size, virt + obj->set_buf_size_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (enable_check_sum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) checksum = rk_pcie_check_sum(virt, SZ_1M - 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) writel(checksum, virt + obj->set_chk_sum_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) buf_size = obj->buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) case PCIE_DMA_DATA_RCV_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) table = obj->table[PCIE_DMA_DATA_RCV_ACK_TABLE_OFFSET + idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) table->type = PCIE_DMA_DATA_RCV_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) table->dir = DMA_TO_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) local = obj->local_mem_start + obj->ack_base + idx * NODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) virt = obj->local_mem_base + obj->ack_base + idx * NODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) bus = obj->remote_mem_start + obj->ack_base + idx * NODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (is_rc(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) local += PCIE_DMA_ACK_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) bus += PCIE_DMA_ACK_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) virt += PCIE_DMA_ACK_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) writel(PCIE_DMA_DATA_ACK_CHECK, virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) case PCIE_DMA_DATA_FREE_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) table = obj->table[PCIE_DMA_DATA_FREE_ACK_TABLE_OFFSET + idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) table->type = PCIE_DMA_DATA_FREE_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) table->dir = DMA_TO_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) local = obj->local_mem_start + obj->ack_base + idx * NODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) bus = obj->remote_mem_start + obj->ack_base + idx * NODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) virt = obj->local_mem_base + obj->ack_base + idx * NODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) buf_size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (is_rc(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) local += 3 * PCIE_DMA_ACK_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) bus += 3 * PCIE_DMA_ACK_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) virt += 3 * PCIE_DMA_ACK_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) local += 2 * PCIE_DMA_ACK_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) bus += 2 * PCIE_DMA_ACK_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) virt += 2 * PCIE_DMA_ACK_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) writel(PCIE_DMA_DATA_FREE_ACK_CHECK, virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) case PCIE_DMA_READ_REMOTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) table = obj->table[PCIE_DMA_DATA_READ_REMOTE_TABLE_OFFSET + local_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) table->type = PCIE_DMA_READ_REMOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) table->dir = DMA_FROM_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) local = obj->local_mem_start + local_idx * obj->buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) bus = obj->remote_mem_start + bus_idx * obj->buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (!is_rc(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) local += obj->rd_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) bus += obj->wr_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) buf_size = obj->buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) dev_err(dev, "type = %d not support\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) table->buf_size = buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) table->bus = bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) table->local = local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) table->chn = chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (!obj->config_dma_func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) obj->config_dma_func(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) spin_lock_irqsave(&obj->tbl_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) list_add_tail(&table->tbl_node, &obj->tbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) spin_unlock_irqrestore(&obj->tbl_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static void rk_pcie_dma_trx_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct dma_trx_obj *obj = container_of(work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct dma_trx_obj, dma_trx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct dma_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) while (!list_empty(&obj->tbl_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) table = list_first_entry(&obj->tbl_list, struct dma_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) tbl_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (obj->dma_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) obj->dma_free = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) spin_lock_irqsave(&obj->tbl_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) list_del_init(&table->tbl_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) spin_unlock_irqrestore(&obj->tbl_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) obj->cur = table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!obj->start_dma_func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) reinit_completion(&obj->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) obj->start_dma_func(obj, table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void rk_pcie_clear_ack(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) writel(0x0, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static enum hrtimer_restart rk_pcie_scan_timer(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned int sdv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned int sav;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned int suv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) void *sda_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void *scan_data_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) void *scan_ack_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) void *scan_user_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) bool need_ack = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct dma_trx_obj *obj = container_of(timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct dma_trx_obj, scan_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) unsigned int check_sum, check_sum_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!obj->remote_mem_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (is_rc(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) obj->remote_mem_start = readl(obj->region_base + 0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) obj->remote_mem_start = readl(obj->region_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) goto continue_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) for (i = 0; i < PCIE_DMA_BUF_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) sda_base = obj->local_mem_base + obj->buffer_size * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (obj->addr_reverse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (is_rc(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) scan_data_addr = sda_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) scan_data_addr = sda_base + obj->rd_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (is_rc(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) scan_data_addr = sda_base + obj->rd_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) scan_data_addr = sda_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) sdv = readl(scan_data_addr + obj->set_data_check_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) idx = readl(scan_data_addr + obj->set_local_idx_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (sdv == PCIE_DMA_DATA_CHECK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!need_ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) need_ack = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (enable_check_sum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) check_sum = readl(scan_data_addr + obj->set_chk_sum_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) check_sum_tmp = rk_pcie_check_sum(scan_data_addr, SZ_1M - 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (check_sum != check_sum_tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) pr_err("checksum[%d] failed, 0x%x, should be 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) idx, check_sum_tmp, check_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 32, 4, scan_data_addr, SZ_1M, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) writel(0x0, scan_data_addr + obj->set_chk_sum_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) writel(0x0, scan_data_addr + obj->set_data_check_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) set_bit(i, &obj->local_read_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) rk_pcie_prepare_dma(obj, idx, 0, 0, 0x4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) PCIE_DMA_DATA_RCV_ACK, PCIE_DMA_DEFAULT_CHN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (need_ack || !list_empty(&obj->tbl_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) queue_work(obj->dma_trx_wq, &obj->dma_trx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) scan_ack_addr = obj->local_mem_base + obj->ack_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) scan_user_addr = obj->local_mem_base + obj->ack_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (is_rc(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) scan_user_addr += PCIE_DMA_ACK_BLOCK_SIZE * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) scan_ack_addr += PCIE_DMA_ACK_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) scan_user_addr += PCIE_DMA_ACK_BLOCK_SIZE * 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) for (i = 0; i < PCIE_DMA_BUF_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) void *addr = scan_ack_addr + i * NODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) sav = readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (sav == PCIE_DMA_DATA_ACK_CHECK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) rk_pcie_clear_ack(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) set_bit(i, &obj->local_write_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) addr = scan_user_addr + i * NODE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) suv = readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (suv == PCIE_DMA_DATA_FREE_ACK_CHECK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) rk_pcie_clear_ack(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) set_bit(i, &obj->remote_write_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if ((obj->local_write_available && obj->remote_write_available) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) obj->local_read_available) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) wake_up(&obj->event_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) continue_scan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) hrtimer_add_expires(&obj->scan_timer, ktime_set(0, 100 * 1000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static int rk_pcie_misc_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct miscdevice *miscdev = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct pcie_misc_dev *pcie_misc_dev = container_of(miscdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct pcie_misc_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) filp->private_data = pcie_misc_dev->obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) mutex_lock(&pcie_misc_dev->obj->count_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (pcie_misc_dev->obj->ref_count++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) goto already_opened;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) pcie_misc_dev->obj->loop_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) pcie_misc_dev->obj->local_read_available = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) pcie_misc_dev->obj->local_write_available = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) pcie_misc_dev->obj->remote_write_available = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) pcie_misc_dev->obj->dma_free = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pr_info("Open pcie misc device success\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) already_opened:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) mutex_unlock(&pcie_misc_dev->obj->count_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static int rk_pcie_misc_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct dma_trx_obj *obj = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) mutex_lock(&obj->count_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (--obj->ref_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto still_opened;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) hrtimer_cancel(&obj->scan_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) pr_info("Close pcie misc device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) still_opened:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) mutex_unlock(&obj->count_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static int rk_pcie_misc_mmap(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct dma_trx_obj *obj = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) size_t size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) err = remap_pfn_range(vma, vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) __phys_to_pfn(obj->local_mem_start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) size, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static void rk_pcie_send_addr_to_remote(struct dma_trx_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct dma_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* Temporary use to send local buffer address to remote */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) table = obj->table[PCIE_DMA_DATA_SND_TABLE_OFFSET];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) table->type = PCIE_DMA_DATA_SND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) table->dir = DMA_TO_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) table->buf_size = 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (is_rc(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) table->local = obj->region_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) table->local = obj->region_start + 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) table->bus = table->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) table->chn = PCIE_DMA_DEFAULT_CHN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) obj->config_dma_func(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) obj->cur = table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) obj->start_dma_func(obj, table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static long rk_pcie_misc_ioctl(struct file *filp, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct dma_trx_obj *obj = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct device *dev = obj->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) union pcie_dma_ioctl_param msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) union pcie_dma_ioctl_param msg_to_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) phys_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) void __user *uarg = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) phys_addr_t addr_send_to_remote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) enum transfer_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (copy_from_user(&msg, uarg, sizeof(msg)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) dev_err(dev, "failed to copy argument into kernel space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) case PCIE_DMA_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) test_and_clear_bit(msg.in.l_widx, &obj->local_write_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) test_and_clear_bit(msg.in.r_widx, &obj->remote_write_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) type = PCIE_DMA_DATA_SND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) obj->loop_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) case PCIE_DMA_GET_LOCAL_READ_BUFFER_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) msg_to_user.lra = obj->local_read_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) addr = obj->local_mem_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (is_rc(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) addr += obj->rd_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /* by kernel auto or by user to invalidate cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) for (i = 0; i < PCIE_DMA_BUF_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (test_bit(i, &obj->local_read_available))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) dma_sync_single_for_cpu(dev, addr + i * obj->buffer_size, obj->buffer_size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) ret = copy_to_user(uarg, &msg_to_user, sizeof(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) dev_err(dev, "failed to get read buffer index\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) case PCIE_DMA_FREE_LOCAL_READ_BUFFER_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) test_and_clear_bit(msg.in.idx, &obj->local_read_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) type = PCIE_DMA_DATA_FREE_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) case PCIE_DMA_GET_LOCAL_REMOTE_WRITE_BUFFER_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) msg_to_user.out.lwa = obj->local_write_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) msg_to_user.out.rwa = obj->remote_write_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ret = copy_to_user(uarg, &msg_to_user, sizeof(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) dev_err(dev, "failed to get write buffer index\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) case PCIE_DMA_SYNC_BUFFER_FOR_CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) addr = obj->local_mem_start + msg.in.idx * obj->buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (is_rc(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) addr += obj->rd_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) dma_sync_single_for_cpu(dev, addr, obj->buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) case PCIE_DMA_WAIT_TRANSFER_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ret = wait_for_completion_interruptible(&obj->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (WARN_ON(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) pr_info("failed to wait complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) obj->cache_time_avarage = obj->cache_time_total / obj->loop_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) pr_debug("cache_time: total = %lld, average = %lld, count = %d, size = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) obj->cache_time_total, obj->cache_time_avarage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) obj->loop_count, obj->buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) obj->cache_time_avarage = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) obj->cache_time_total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) obj->loop_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) case PCIE_DMA_SET_LOOP_COUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) obj->loop_count_threshold = msg.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) pr_info("threshold = %d\n", obj->loop_count_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) case PCIE_DMA_GET_TOTAL_BUFFER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) msg_to_user.total_buffer_size = obj->local_mem_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ret = copy_to_user(uarg, &msg_to_user, sizeof(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) dev_err(dev, "failed to get write buffer index\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) case PCIE_DMA_SET_BUFFER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) obj->buffer_size = msg.buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) pr_debug("buffer_size = %d\n", obj->buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) obj->rd_buf_size = obj->buffer_size * PCIE_DMA_BUF_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) obj->wr_buf_size = obj->buffer_size * PCIE_DMA_BUF_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) obj->ack_base = obj->rd_buf_size + obj->wr_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) obj->set_data_check_pos = obj->buffer_size - 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) obj->set_local_idx_pos = obj->buffer_size - 0x8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) obj->set_buf_size_pos = obj->buffer_size - 0xc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) obj->set_chk_sum_pos = obj->buffer_size - 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) case PCIE_DMA_READ_FROM_REMOTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pr_debug("read buffer from : %d to local : %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) msg.in.r_widx, msg.in.l_widx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) type = PCIE_DMA_READ_REMOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) case PCIE_DMA_USER_SET_BUF_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* If msg.local_addr valid, use msg.local_addr for local buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * and should be contiguous physical address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * If msg.local is zero, local buffer get from DT reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * Anyway local buffer address should send to remote, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * remote know where to send data to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * Should finish this case first before send data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (msg.local_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) pr_debug("local_addr = %pa\n", &msg.local_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) addr_send_to_remote = (phys_addr_t)msg.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) obj->local_mem_start = (phys_addr_t)msg.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* Unmap previous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) rk_pcie_unmap_kernel(obj->local_mem_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* Remap userspace's buffer to kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) obj->local_mem_base = rk_pcie_map_kernel(obj->local_mem_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) obj->buffer_size * PCIE_DMA_BUF_CNT * 2 + SZ_4K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (!obj->local_mem_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) addr_send_to_remote = obj->local_mem_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (is_rc(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) writel(addr_send_to_remote, obj->region_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) writel(addr_send_to_remote, obj->region_base + 0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) rk_pcie_send_addr_to_remote(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) hrtimer_start(&obj->scan_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) ktime_set(0, 1 * 1000 * 1000 * 1000), HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) case PCIE_DMA_GET_BUFFER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) msg_to_user.buffer_size = obj->buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ret = copy_to_user(uarg, &msg_to_user, sizeof(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dev_err(dev, "failed to get buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) pr_info("%s, %d, cmd : %x not support\n", __func__, __LINE__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (cmd == PCIE_DMA_START || cmd == PCIE_DMA_READ_FROM_REMOTE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) cmd == PCIE_DMA_FREE_LOCAL_READ_BUFFER_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) rk_pcie_prepare_dma(obj, msg.in.idx, msg.in.r_widx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) msg.in.l_widx, msg.in.size, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) msg.in.chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) queue_work(obj->dma_trx_wq, &obj->dma_trx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static unsigned int rk_pcie_misc_poll(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct dma_trx_obj *obj = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) u32 lwa, rwa, lra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) u32 ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) poll_wait(filp, &obj->event_queue, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) lwa = obj->local_write_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) rwa = obj->remote_write_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (lwa && rwa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ret = POLLOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) lra = obj->local_read_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (lra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ret |= POLLIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static const struct file_operations rk_pcie_misc_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) .open = rk_pcie_misc_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) .release = rk_pcie_misc_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) .mmap = rk_pcie_misc_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) .unlocked_ioctl = rk_pcie_misc_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) .poll = rk_pcie_misc_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static void rk_pcie_delete_misc(struct dma_trx_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) misc_deregister(&obj->pcie_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static int rk_pcie_add_misc(struct dma_trx_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct pcie_misc_dev *pcie_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) pcie_dev = devm_kzalloc(obj->dev, sizeof(*pcie_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (!pcie_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) pcie_dev->dev.minor = MISC_DYNAMIC_MINOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) pcie_dev->dev.name = "pcie-dev";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) pcie_dev->dev.fops = &rk_pcie_misc_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) pcie_dev->dev.parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ret = misc_register(&pcie_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) pr_err("pcie: failed to register misc device.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) pcie_dev->obj = obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) obj->pcie_dev = pcie_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) pr_info("register misc device pcie-dev\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static void *rk_pcie_map_kernel(phys_addr_t start, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) pgprot_t pgprot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) int npages = PAGE_ALIGN(len) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct page **p = vmalloc(sizeof(struct page *) * npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) pgprot = pgprot_noncached(PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) phys = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) for (i = 0; i < npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) p[i] = phys_to_page(phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) phys += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) vaddr = vmap(p, npages, VM_MAP, pgprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) vfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static void rk_pcie_unmap_kernel(void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) vunmap(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static void rk_pcie_dma_table_free(struct dma_trx_obj *obj, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct dma_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (num > PCIE_DMA_TABLE_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) num = PCIE_DMA_TABLE_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) table = obj->table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) dma_free_coherent(obj->dev, PCIE_DMA_PARAM_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) table->descs, table->phys_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) kfree(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static int rk_pcie_dma_table_alloc(struct dma_trx_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct dma_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) for (i = 0; i < PCIE_DMA_TABLE_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) table = kzalloc(sizeof(*table), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (!table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) goto free_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) table->descs = dma_alloc_coherent(obj->dev, PCIE_DMA_PARAM_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) &table->phys_descs, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!table->descs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) kfree(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) goto free_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) table->chn = PCIE_DMA_DEFAULT_CHN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) INIT_LIST_HEAD(&table->tbl_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) obj->table[i] = table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) free_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) rk_pcie_dma_table_free(obj, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) dev_err(obj->dev, "Failed to alloc dma table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static int rk_pcie_debugfs_trx_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct dma_trx_obj *dma_obj = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) bool list = list_empty(&dma_obj->tbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) seq_printf(s, "version = %x,", dma_obj->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) seq_printf(s, "last:%s,",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) dma_obj->cur ? (dma_obj->cur->dir == DMA_FROM_BUS ? "read" : "write") : "no trx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) seq_printf(s, "irq_num = %ld, loop_count = %d,",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) dma_obj->irq_num, dma_obj->loop_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) seq_printf(s, "loop_threshold = %d,",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) dma_obj->loop_count_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) seq_printf(s, "lwa = %lx, rwa = %lx, lra = %lx,",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) dma_obj->local_write_available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) dma_obj->remote_write_available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) dma_obj->local_read_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) seq_printf(s, "list : (%s), dma chn : (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) list ? "empty" : "not empty",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) dma_obj->dma_free ? "free" : "busy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) static int rk_pcie_debugfs_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return single_open(file, rk_pcie_debugfs_trx_show, inode->i_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static ssize_t rk_pcie_debugfs_write(struct file *file, const char __user *user_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ret = kstrtoint_from_user(user_buf, count, 0, &enable_check_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) static const struct file_operations rk_pcie_debugfs_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) .open = rk_pcie_debugfs_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) .release = single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) .write = rk_pcie_debugfs_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct dma_trx_obj *rk_pcie_dma_obj_probe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) int busno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct device_node *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct resource reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct dma_trx_obj *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) int reverse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) obj = devm_kzalloc(dev, sizeof(struct dma_trx_obj), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) obj->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) ret = of_property_read_u32(np, "busno", &busno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) dev_err(dev, "missing \"busno\" property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) obj->busno = busno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) ret = of_property_read_u32(np, "reverse", &reverse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) obj->addr_reverse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) obj->addr_reverse = reverse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) mem = of_parse_phandle(np, "memory-region", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (!mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) dev_err(dev, "missing \"memory-region\" property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) ret = of_address_to_resource(mem, 0, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) dev_err(dev, "missing \"reg\" property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) obj->local_mem_start = reg.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) obj->local_mem_size = resource_size(®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) obj->local_mem_base = rk_pcie_map_kernel(obj->local_mem_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) obj->local_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (!obj->local_mem_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) mem = of_parse_phandle(np, "memory-region1", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (!mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) dev_err(dev, "missing \"memory-region1\" property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) obj = ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) goto unmap_local_mem_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) ret = of_address_to_resource(mem, 0, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) dev_err(dev, "missing \"reg\" property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) obj = ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) goto unmap_local_mem_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) obj->region_start = reg.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) obj->region_size = resource_size(®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) obj->region_base = rk_pcie_map_kernel(obj->region_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) obj->region_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (!obj->region_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) dev_err(dev, "mapping region_base error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) obj = ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) goto unmap_local_mem_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (!is_rc(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) writel(0x0, obj->region_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) writel(0x0, obj->region_base + 0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) ret = rk_pcie_dma_table_alloc(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) dev_err(dev, "rk_pcie_dma_table_alloc error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) obj = ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) goto unmap_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) obj->dma_trx_wq = create_singlethread_workqueue("dma_trx_wq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) INIT_WORK(&obj->dma_trx_work, rk_pcie_dma_trx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) INIT_LIST_HEAD(&obj->tbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) spin_lock_init(&obj->tbl_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) init_waitqueue_head(&obj->event_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) hrtimer_init_on_stack(&obj->scan_timer, CLOCK_MONOTONIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) obj->scan_timer.function = rk_pcie_scan_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) obj->irq_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) obj->loop_count_threshold = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) obj->ref_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) obj->version = 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) init_completion(&obj->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) obj->cb = rk_pcie_handle_dma_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) mutex_init(&obj->count_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) rk_pcie_add_misc(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) obj->pcie_root = debugfs_create_dir("pcie", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (!obj->pcie_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) obj = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) goto free_dma_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) debugfs_create_file("pcie_trx", 0644, obj->pcie_root, obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) &rk_pcie_debugfs_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) free_dma_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) rk_pcie_dma_table_free(obj, PCIE_DMA_TABLE_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) unmap_region:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) rk_pcie_unmap_kernel(obj->region_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) unmap_local_mem_region:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) rk_pcie_unmap_kernel(obj->local_mem_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) EXPORT_SYMBOL_GPL(rk_pcie_dma_obj_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) void rk_pcie_dma_obj_remove(struct dma_trx_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) hrtimer_cancel(&obj->scan_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) destroy_hrtimer_on_stack(&obj->scan_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) rk_pcie_delete_misc(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) rk_pcie_unmap_kernel(obj->local_mem_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) rk_pcie_dma_table_free(obj, PCIE_DMA_TABLE_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) destroy_workqueue(obj->dma_trx_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) debugfs_remove_recursive(obj->pcie_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) EXPORT_SYMBOL_GPL(rk_pcie_dma_obj_remove);