^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Filename: dma.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (C) Copyright 2013 IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "rsxx_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct rsxx_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) u8 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) unsigned int laddr; /* Logical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) u32 off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u32 cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) } sub_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned int pg_off; /* Page Offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) rsxx_dma_cb cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) void *cb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* This timeout is used to detect a stalled DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct hw_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u8 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) __le16 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) __le32 _rsvd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) __le64 _rsvd3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) enum rsxx_dma_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) DMA_SW_ERR = 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) DMA_HW_FAULT = 0x2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) DMA_CANCELLED = 0x4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct hw_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u8 command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u8 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u8 _rsvd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u8 sub_page; /* Bit[0:2]: 512byte offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Bit[4:6]: 512byte count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) __le32 device_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) __le64 host_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) enum rsxx_hw_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) HW_CMD_BLK_DISCARD = 0x70,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) HW_CMD_BLK_WRITE = 0x80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) HW_CMD_BLK_READ = 0xC0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) HW_CMD_BLK_RECON_READ = 0xE0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) enum rsxx_hw_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) HW_STATUS_CRC = 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) HW_STATUS_HARD_ERR = 0x02,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) HW_STATUS_SOFT_ERR = 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) HW_STATUS_FAULT = 0x08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static struct kmem_cache *rsxx_dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct dma_tracker {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int next_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct rsxx_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct dma_tracker_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct dma_tracker list[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*----------------- Misc Utility Functions -------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned long long tgt_addr8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) card->_stripe.upper_mask) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ((addr8) & card->_stripe.lower_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) do_div(tgt_addr8, RSXX_HW_BLK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return tgt_addr8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned int tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Reset all DMA Command/Status Queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static unsigned int get_dma_size(struct rsxx_dma *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (dma->sub_page.cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return dma->sub_page.cnt << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return RSXX_HW_BLK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*----------------- DMA Tracker -------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void set_tracker_dma(struct dma_tracker_list *trackers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct rsxx_dma *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) trackers->list[tag].dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return trackers->list[tag].dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static int pop_tracker(struct dma_tracker_list *trackers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) spin_lock(&trackers->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) tag = trackers->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (tag != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) trackers->head = trackers->list[tag].next_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) trackers->list[tag].next_tag = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) spin_unlock(&trackers->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static void push_tracker(struct dma_tracker_list *trackers, int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) spin_lock(&trackers->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) trackers->list[tag].next_tag = trackers->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) trackers->head = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) trackers->list[tag].dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_unlock(&trackers->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*----------------- Interrupt Coalescing -------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * Interrupt Coalescing Register Format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Interrupt Timer (64ns units) [15:0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * Interrupt Count [24:16]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Reserved [31:25]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define INTR_COAL_LATENCY_MASK (0x0000ffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define INTR_COAL_COUNT_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define INTR_COAL_COUNT_BITS 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) INTR_COAL_COUNT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define INTR_COAL_LATENCY_UNITS_NS 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (mode == RSXX_INTR_COAL_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) (latency_units & INTR_COAL_LATENCY_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u32 q_depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u32 intr_coal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unlikely(card->eeh_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) for (i = 0; i < card->n_targets; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) q_depth / 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) card->config.data.intr_coal.latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) iowrite32(intr_coal, card->regmap + INTR_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /*----------------- RSXX DMA Handling -------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (dma->cmd != HW_CMD_BLK_DISCARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (!dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) dma_unmap_page(&ctrl->card->dev->dev, dma->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) get_dma_size(dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) dma->cmd == HW_CMD_BLK_WRITE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) DMA_TO_DEVICE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) kmem_cache_free(rsxx_dma_pool, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct rsxx_dma *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (status & DMA_SW_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ctrl->stats.dma_sw_err++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (status & DMA_HW_FAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ctrl->stats.dma_hw_fault++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (status & DMA_CANCELLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ctrl->stats.dma_cancelled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (dma->cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) rsxx_free_dma(ctrl, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct list_head *q, unsigned int done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct rsxx_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct rsxx_dma *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) list_for_each_entry_safe(dma, tmp, q, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) list_del(&dma->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (done & COMPLETE_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) rsxx_free_dma(ctrl, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct rsxx_dma *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * Requeued DMAs go to the front of the queue so they are issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) spin_lock_bh(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ctrl->stats.sw_q_depth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) list_add(&dma->list, &ctrl->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) spin_unlock_bh(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct rsxx_dma *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u8 hw_st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) unsigned int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int requeue_cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) dev_dbg(CARD_TO_DEV(ctrl->card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) dma->cmd, dma->laddr, hw_st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (hw_st & HW_STATUS_CRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ctrl->stats.crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (hw_st & HW_STATUS_HARD_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ctrl->stats.hard_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (hw_st & HW_STATUS_SOFT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ctrl->stats.soft_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) switch (dma->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) case HW_CMD_BLK_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (ctrl->card->scrub_hard) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dma->cmd = HW_CMD_BLK_RECON_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) requeue_cmd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ctrl->stats.reads_retried++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) status |= DMA_HW_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ctrl->stats.reads_failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) } else if (hw_st & HW_STATUS_FAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) status |= DMA_HW_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ctrl->stats.reads_failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) case HW_CMD_BLK_RECON_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* Data could not be reconstructed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) status |= DMA_HW_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ctrl->stats.reads_failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) case HW_CMD_BLK_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) status |= DMA_HW_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ctrl->stats.writes_failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) case HW_CMD_BLK_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) status |= DMA_HW_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ctrl->stats.discards_failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) dev_err(CARD_TO_DEV(ctrl->card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) "Unknown command in DMA!(cmd: x%02x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) "laddr x%08x st: x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) dma->cmd, dma->laddr, hw_st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) status |= DMA_SW_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (requeue_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) rsxx_requeue_dma(ctrl, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) rsxx_complete_dma(ctrl, dma, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void dma_engine_stalled(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) unlikely(ctrl->card->eeh_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * The dma engine was stalled because the SW_CMD_IDX write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * was lost. Issue it again to recover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) dev_warn(CARD_TO_DEV(ctrl->card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) "SW_CMD_IDX write was lost, re-writing...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) mod_timer(&ctrl->activity_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) jiffies + DMA_ACTIVITY_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) dev_warn(CARD_TO_DEV(ctrl->card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) "DMA channel %d has stalled, faulting interface.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ctrl->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ctrl->card->dma_fault = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* Clean up the DMA queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_lock(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) spin_unlock(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) cnt += rsxx_dma_cancel(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dev_info(CARD_TO_DEV(ctrl->card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) "Freed %d queued DMAs on channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) cnt, ctrl->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct rsxx_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int cmds_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct hw_cmd *hw_cmd_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) hw_cmd_buf = ctrl->cmd.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (unlikely(ctrl->card->halt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unlikely(ctrl->card->eeh_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) spin_lock_bh(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (list_empty(&ctrl->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) spin_unlock_bh(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) spin_unlock_bh(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) tag = pop_tracker(ctrl->trackers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (tag == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) spin_lock_bh(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) list_del(&dma->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ctrl->stats.sw_q_depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) spin_unlock_bh(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * This will catch any DMAs that slipped in right before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * fault, but was queued after all the other DMAs were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * cancelled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (unlikely(ctrl->card->dma_fault)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) push_tracker(ctrl->trackers, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (dma->cmd != HW_CMD_BLK_DISCARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (dma->cmd == HW_CMD_BLK_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * The function dma_map_page is placed here because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * can only, by design, issue up to 255 commands to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * hardware at one time per DMA channel. So the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * amount of mapped memory would be 255 * 4 channels *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * 4096 Bytes which is less than 2GB, the limit of a x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * Non-HWWD PCIe slot. This way the dma_map_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * function should never fail because of a lack of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * mappable memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) dma->dma_addr = dma_map_page(&ctrl->card->dev->dev, dma->page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) dma->pg_off, dma->sub_page.cnt << 9, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) push_tracker(ctrl->trackers, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) set_tracker_dma(ctrl->trackers, tag, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) hw_cmd_buf[ctrl->cmd.idx].tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) hw_cmd_buf[ctrl->cmd.idx].sub_page =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ((dma->sub_page.cnt & 0x7) << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) (dma->sub_page.off & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) hw_cmd_buf[ctrl->cmd.idx].device_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) cpu_to_le32(dma->laddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) hw_cmd_buf[ctrl->cmd.idx].host_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) cpu_to_le64(dma->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) dev_dbg(CARD_TO_DEV(ctrl->card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) "Issue DMA%d(laddr %d tag %d) to idx %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) cmds_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (dma->cmd == HW_CMD_BLK_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) ctrl->stats.writes_issued++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) else if (dma->cmd == HW_CMD_BLK_DISCARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ctrl->stats.discards_issued++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ctrl->stats.reads_issued++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* Let HW know we've queued commands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (cmds_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) mod_timer(&ctrl->activity_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) jiffies + DMA_ACTIVITY_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (unlikely(ctrl->card->eeh_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) del_timer_sync(&ctrl->activity_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct rsxx_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) u16 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) u8 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct hw_status *hw_st_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) hw_st_buf = ctrl->status.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (unlikely(ctrl->card->halt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) unlikely(ctrl->card->dma_fault) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) unlikely(ctrl->card->eeh_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) while (count == ctrl->e_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * The read memory-barrier is necessary to keep aggressive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * processors/optimizers (such as the PPC Apple G5) from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * reordering the following status-buffer tag & status read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * *before* the count read on subsequent iterations of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * loop!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) status = hw_st_buf[ctrl->status.idx].status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) tag = hw_st_buf[ctrl->status.idx].tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dma = get_tracker_dma(ctrl->trackers, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (dma == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) spin_lock_irqsave(&ctrl->card->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dev_err(CARD_TO_DEV(ctrl->card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) "No tracker for tag %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) "(idx %d id %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) tag, ctrl->status.idx, ctrl->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dev_dbg(CARD_TO_DEV(ctrl->card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) "Completing DMA%d"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ctrl->id, dma->laddr, tag, status, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ctrl->status.idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) atomic_dec(&ctrl->stats.hw_q_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) mod_timer(&ctrl->activity_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) jiffies + DMA_ACTIVITY_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) rsxx_handle_dma_error(ctrl, dma, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) rsxx_complete_dma(ctrl, dma, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) push_tracker(ctrl->trackers, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ctrl->status.idx = (ctrl->status.idx + 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) RSXX_CS_IDX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ctrl->e_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) dma_intr_coal_auto_tune(ctrl->card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) del_timer_sync(&ctrl->activity_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) spin_lock_irqsave(&ctrl->card->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) spin_lock_bh(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (ctrl->stats.sw_q_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) spin_unlock_bh(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static void rsxx_schedule_issue(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct rsxx_dma_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) mutex_lock(&ctrl->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) rsxx_issue_dmas(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) mutex_unlock(&ctrl->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static void rsxx_schedule_done(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct rsxx_dma_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) mutex_lock(&ctrl->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) rsxx_dma_done(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) mutex_unlock(&ctrl->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct list_head *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) unsigned int laddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) rsxx_dma_cb cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) void *cb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct rsxx_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dma->cmd = HW_CMD_BLK_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) dma->laddr = laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) dma->dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) dma->sub_page.off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) dma->sub_page.cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) dma->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) dma->pg_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) dma->cb = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) dma->cb_data = cb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) list_add_tail(&dma->list, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct list_head *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) unsigned int dma_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unsigned int dma_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) unsigned int laddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) unsigned int pg_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) rsxx_dma_cb cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) void *cb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct rsxx_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) dma->laddr = laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dma->sub_page.off = (dma_off >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dma->sub_page.cnt = (dma_len >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) dma->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) dma->pg_off = pg_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) dma->cb = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dma->cb_data = cb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dev_dbg(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) dma->sub_page.cnt, dma->page, dma->pg_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* Queue the DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) list_add_tail(&dma->list, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) atomic_t *n_dmas,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) rsxx_dma_cb cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) void *cb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct list_head dma_list[RSXX_MAX_TARGETS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) unsigned long long addr8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) unsigned int laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned int bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) unsigned int bv_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) unsigned int dma_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unsigned int dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int dma_cnt[RSXX_MAX_TARGETS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) int tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) blk_status_t st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) atomic_set(n_dmas, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) for (i = 0; i < card->n_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) INIT_LIST_HEAD(&dma_list[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) dma_cnt[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (bio_op(bio) == REQ_OP_DISCARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) bv_len = bio->bi_iter.bi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) while (bv_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) tgt = rsxx_get_dma_tgt(card, addr8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) laddr = rsxx_addr8_to_laddr(addr8, card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) cb, cb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) goto bvec_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) dma_cnt[tgt]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) atomic_inc(n_dmas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) addr8 += RSXX_HW_BLK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) bv_len -= RSXX_HW_BLK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) bio_for_each_segment(bvec, bio, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) bv_len = bvec.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) bv_off = bvec.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) while (bv_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) tgt = rsxx_get_dma_tgt(card, addr8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) laddr = rsxx_addr8_to_laddr(addr8, card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) dma_off = addr8 & RSXX_HW_BLK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dma_len = min(bv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) RSXX_HW_BLK_SIZE - dma_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) st = rsxx_queue_dma(card, &dma_list[tgt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) bio_data_dir(bio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) dma_off, dma_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) laddr, bvec.bv_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) bv_off, cb, cb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) goto bvec_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) dma_cnt[tgt]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) atomic_inc(n_dmas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) addr8 += dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) bv_off += dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) bv_len -= dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) for (i = 0; i < card->n_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (!list_empty(&dma_list[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) spin_lock_bh(&card->ctrl[i].queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) spin_unlock_bh(&card->ctrl[i].queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) queue_work(card->ctrl[i].issue_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) &card->ctrl[i].issue_dma_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) bvec_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) for (i = 0; i < card->n_targets; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) FREE_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /*----------------- DMA Engine Initialization & Setup -------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ctrl->status.buf = dma_alloc_coherent(&dev->dev, STATUS_BUFFER_SIZE8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) &ctrl->status.dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ctrl->cmd.buf = dma_alloc_coherent(&dev->dev, COMMAND_BUFFER_SIZE8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) &ctrl->cmd.dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) iowrite32(lower_32_bits(ctrl->status.dma_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ctrl->regmap + SB_ADD_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) iowrite32(upper_32_bits(ctrl->status.dma_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) ctrl->regmap + SB_ADD_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ctrl->status.idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ctrl->status.idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static int rsxx_dma_ctrl_init(struct pci_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct rsxx_dma_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) int st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) memset(&ctrl->stats, 0, sizeof(ctrl->stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (!ctrl->trackers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ctrl->trackers->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ctrl->trackers->list[i].next_tag = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ctrl->trackers->list[i].dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) spin_lock_init(&ctrl->trackers->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) spin_lock_init(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) mutex_init(&ctrl->work_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) INIT_LIST_HEAD(&ctrl->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (!ctrl->issue_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (!ctrl->done_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) st = rsxx_hw_buffers_init(dev, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) unsigned int stripe_size8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (!is_power_of_2(stripe_size8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) dev_err(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) "stripe_size is NOT a power of 2!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) card->_stripe.lower_mask = stripe_size8 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) card->_stripe.upper_mask = ~(card->_stripe.lower_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) card->_stripe.upper_shift = ffs(card->n_targets) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) card->_stripe.target_mask = card->n_targets - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) card->_stripe.target_shift = ffs(stripe_size8) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) card->_stripe.lower_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) card->_stripe.upper_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) card->_stripe.upper_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) card->_stripe.target_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) card->_stripe.target_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) int rsxx_dma_configure(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) u32 intr_coal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) card->config.data.intr_coal.count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) card->config.data.intr_coal.latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) iowrite32(intr_coal, card->regmap + INTR_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return rsxx_dma_stripe_setup(card, card->config.data.stripe_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) int rsxx_dma_setup(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) int st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) dev_info(CARD_TO_DEV(card),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) "Initializing %d DMA targets\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) card->n_targets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /* Regmap is divided up into 4K chunks. One for each DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) for (i = 0; i < card->n_targets; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) card->ctrl[i].regmap = card->regmap + (i * 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) card->dma_fault = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /* Reset the DMA queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) rsxx_dma_queue_reset(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /************* Setup DMA Control *************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) for (i = 0; i < card->n_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) goto failed_dma_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) card->ctrl[i].card = card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) card->ctrl[i].id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) card->scrub_hard = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (card->config_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) rsxx_dma_configure(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* Enable the interrupts after all setup has completed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) for (i = 0; i < card->n_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) spin_lock_irqsave(&card->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) spin_unlock_irqrestore(&card->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) failed_dma_setup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) for (i = 0; i < card->n_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (ctrl->issue_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) destroy_workqueue(ctrl->issue_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) ctrl->issue_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (ctrl->done_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) destroy_workqueue(ctrl->done_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) ctrl->done_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (ctrl->trackers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) vfree(ctrl->trackers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (ctrl->status.buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) ctrl->status.buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ctrl->status.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (ctrl->cmd.buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ctrl->cmd.buf, ctrl->cmd.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct rsxx_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* Clean up issued DMAs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) dma = get_tracker_dma(ctrl->trackers, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) atomic_dec(&ctrl->stats.hw_q_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) push_tracker(ctrl->trackers, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) void rsxx_dma_destroy(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct rsxx_dma_ctrl *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) for (i = 0; i < card->n_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) ctrl = &card->ctrl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (ctrl->issue_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) destroy_workqueue(ctrl->issue_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) ctrl->issue_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (ctrl->done_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) destroy_workqueue(ctrl->done_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ctrl->done_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (timer_pending(&ctrl->activity_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) del_timer_sync(&ctrl->activity_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) /* Clean up the DMA queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) spin_lock_bh(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) spin_unlock_bh(&ctrl->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) rsxx_dma_cancel(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) vfree(ctrl->trackers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ctrl->status.buf, ctrl->status.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) ctrl->cmd.buf, ctrl->cmd.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct rsxx_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct list_head *issued_dmas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) issued_dmas = kcalloc(card->n_targets, sizeof(*issued_dmas),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (!issued_dmas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) for (i = 0; i < card->n_targets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) INIT_LIST_HEAD(&issued_dmas[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) dma = get_tracker_dma(card->ctrl[i].trackers, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (dma == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (dma->cmd == HW_CMD_BLK_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) card->ctrl[i].stats.writes_issued--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) else if (dma->cmd == HW_CMD_BLK_DISCARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) card->ctrl[i].stats.discards_issued--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) card->ctrl[i].stats.reads_issued--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (dma->cmd != HW_CMD_BLK_DISCARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) dma_unmap_page(&card->dev->dev, dma->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) get_dma_size(dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) dma->cmd == HW_CMD_BLK_WRITE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) DMA_TO_DEVICE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) list_add_tail(&dma->list, &issued_dmas[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) push_tracker(card->ctrl[i].trackers, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) spin_lock_bh(&card->ctrl[i].queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) list_splice(&issued_dmas[i], &card->ctrl[i].queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) card->ctrl[i].stats.sw_q_depth += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) card->ctrl[i].e_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) spin_unlock_bh(&card->ctrl[i].queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) kfree(issued_dmas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) int rsxx_dma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (!rsxx_dma_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) void rsxx_dma_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) kmem_cache_destroy(rsxx_dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)