Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Intel I/OAT DMA Linux driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright(c) 2004 - 2015 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * copy operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "registers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "hw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) static int completion_timeout = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) module_param(completion_timeout, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) MODULE_PARM_DESC(completion_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 		"set ioat completion timeout [msec] (default 200 [msec])");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) static int idle_timeout = 2000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) module_param(idle_timeout, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) MODULE_PARM_DESC(idle_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 		"set ioat idel timeout [msec] (default 2000 [msec])");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static char *chanerr_str[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	"DMA Transfer Source Address Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	"DMA Transfer Destination Address Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	"Next Descriptor Address Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	"Descriptor Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	"Chan Address Value Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	"CHANCMD Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	"Chipset Uncorrectable Data Integrity Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	"DMA Uncorrectable Data Integrity Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	"Read Data Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	"Write Data Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	"Descriptor Control Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	"Descriptor Transfer Size Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	"Completion Address Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	"Interrupt Configuration Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	"Super extended descriptor Address Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	"Unaffiliated Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	"CRC or XOR P Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	"XOR Q Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	"Descriptor Count Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	"DIF All F detect Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	"Guard Tag verification Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	"Application Tag verification Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	"Reference Tag verification Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	"Bundle Bit Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	"Result DIF All F detect Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	"Result Guard Tag verification Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	"Result Application Tag verification Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	"Result Reference Tag verification Error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static void ioat_eh(struct ioatdma_chan *ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		if ((chanerr >> i) & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 			dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 				i, chanerr_str[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  * @irq: interrupt id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  * @data: interrupt data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct ioatdma_device *instance = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	struct ioatdma_chan *ioat_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	unsigned long attnstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	u8 intrctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		ioat_chan = ioat_chan_by_index(instance, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		if (test_bit(IOAT_RUN, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 			tasklet_schedule(&ioat_chan->cleanup_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * @irq: interrupt id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  * @data: interrupt data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	struct ioatdma_chan *ioat_chan = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	if (test_bit(IOAT_RUN, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		tasklet_schedule(&ioat_chan->cleanup_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) void ioat_stop(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	struct pci_dev *pdev = ioat_dma->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	int chan_id = chan_num(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	struct msix_entry *msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	/* 1/ stop irq from firing tasklets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	 * 2/ stop the tasklet from re-arming irqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	clear_bit(IOAT_RUN, &ioat_chan->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	/* flush inflight interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	switch (ioat_dma->irq_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	case IOAT_MSIX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		msix = &ioat_dma->msix_entries[chan_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		synchronize_irq(msix->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	case IOAT_MSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	case IOAT_INTX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		synchronize_irq(pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	/* flush inflight timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	del_timer_sync(&ioat_chan->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	/* flush inflight tasklet runs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	tasklet_kill(&ioat_chan->cleanup_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	/* final cleanup now that everything is quiesced and can't re-arm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	ioat_cleanup_event(&ioat_chan->cleanup_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	ioat_chan->issued = ioat_chan->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	writew(ioat_chan->dmacount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	dev_dbg(to_dev(ioat_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		__func__, ioat_chan->head, ioat_chan->tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		ioat_chan->issued, ioat_chan->dmacount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) void ioat_issue_pending(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	if (ioat_ring_pending(ioat_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		spin_lock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		__ioat_issue_pending(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * ioat_update_pending - log pending descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * @ioat_chan: ioat+ channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  * Check if the number of unsubmitted descriptors has exceeded the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * watermark.  Called with prep_lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		__ioat_issue_pending(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	struct ioat_ring_ent *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	struct ioat_dma_descriptor *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	if (ioat_ring_space(ioat_chan) < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		dev_err(to_dev(ioat_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			"Unable to start null desc - ring full\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	dev_dbg(to_dev(ioat_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		"%s: head: %#x tail: %#x issued: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	hw = desc->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	hw->ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	hw->ctl_f.null = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	hw->ctl_f.int_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	hw->ctl_f.compl_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	/* set size to non-zero value (channel returns error when size is 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	hw->size = NULL_DESC_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	hw->src_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	hw->dst_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	async_tx_ack(&desc->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	ioat_set_chainaddr(ioat_chan, desc->txd.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	dump_desc_dbg(ioat_chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	/* make sure descriptors are written before we submit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	ioat_chan->head += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	__ioat_issue_pending(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	spin_lock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		__ioat_start_null_desc(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	/* set the tail to be re-issued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	ioat_chan->issued = ioat_chan->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	ioat_chan->dmacount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	dev_dbg(to_dev(ioat_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		__func__, ioat_chan->head, ioat_chan->tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		ioat_chan->issued, ioat_chan->dmacount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	if (ioat_ring_pending(ioat_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		struct ioat_ring_ent *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		ioat_set_chainaddr(ioat_chan, desc->txd.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		__ioat_issue_pending(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		__ioat_start_null_desc(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	unsigned long end = jiffies + tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	status = ioat_chansts(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	if (is_ioat_active(status) || is_ioat_idle(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		ioat_suspend(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	while (is_ioat_active(status) || is_ioat_idle(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		if (tmo && time_after(jiffies, end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			err = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		status = ioat_chansts(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	unsigned long end = jiffies + tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	ioat_reset(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	while (ioat_reset_pending(ioat_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		if (end && time_after(jiffies, end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 			err = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	__releases(&ioat_chan->prep_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	struct dma_chan *c = tx->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	/* make descriptor updates visible before advancing ioat->head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	 * this is purposefully not smp_wmb() since we are also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	 * publishing the descriptor updates to a dma device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	ioat_chan->head += ioat_chan->produce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	ioat_update_pending(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) static struct ioat_ring_ent *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	struct ioat_dma_descriptor *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	struct ioat_ring_ent *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	int chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	u8 *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	off_t offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	chunk = idx / IOAT_DESCS_PER_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	idx &= (IOAT_DESCS_PER_CHUNK - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	offs = idx * IOAT_DESC_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	phys = ioat_chan->descs[chunk].hw + offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	hw = (struct ioat_dma_descriptor *)pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	memset(hw, 0, sizeof(*hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	desc = kmem_cache_zalloc(ioat_cache, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	dma_async_tx_descriptor_init(&desc->txd, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	desc->txd.tx_submit = ioat_tx_submit_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	desc->hw = hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	desc->txd.phys = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	kmem_cache_free(ioat_cache, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) struct ioat_ring_ent **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	struct ioat_ring_ent **ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	int total_descs = 1 << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	int i, chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	/* allocate the array to hold the software ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	ring = kcalloc(total_descs, sizeof(*ring), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	ioat_chan->desc_chunks = chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	for (i = 0; i < chunks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		struct ioat_descs *descs = &ioat_chan->descs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 					IOAT_CHUNK_SIZE, &descs->hw, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		if (!descs->virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			for (idx = 0; idx < i; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				descs = &ioat_chan->descs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 				dma_free_coherent(to_dev(ioat_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 						IOAT_CHUNK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 						descs->virt, descs->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 				descs->virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 				descs->hw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 			ioat_chan->desc_chunks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			kfree(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	for (i = 0; i < total_descs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		ring[i] = ioat_alloc_ring_ent(c, i, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		if (!ring[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 			int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 			while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 				ioat_free_ring_ent(ring[i], c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 				dma_free_coherent(to_dev(ioat_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 						  IOAT_CHUNK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 						  ioat_chan->descs[idx].virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 						  ioat_chan->descs[idx].hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 				ioat_chan->descs[idx].virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 				ioat_chan->descs[idx].hw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			ioat_chan->desc_chunks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			kfree(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		set_desc_id(ring[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	/* link descs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	for (i = 0; i < total_descs-1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		struct ioat_ring_ent *next = ring[i+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		struct ioat_dma_descriptor *hw = ring[i]->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		hw->next = next->txd.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	ring[i]->hw->next = ring[0]->txd.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	/* setup descriptor pre-fetching for v3.4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (ioat_dma->cap & IOAT_CAP_DPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		if (chunks == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	return ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459)  * ioat_check_space_lock - verify space and grab ring producer lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460)  * @ioat_chan: ioat,3 channel (ring) to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461)  * @num_descs: allocation length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	__acquires(&ioat_chan->prep_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	spin_lock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	/* never allow the last descriptor to be consumed, we need at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	 * least one free at all times to allow for on-the-fly ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	 * resizing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			__func__, num_descs, ioat_chan->head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			ioat_chan->tail, ioat_chan->issued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		ioat_chan->produce = num_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		return 0;  /* with ioat->prep_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	dev_dbg_ratelimited(to_dev(ioat_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			    "%s: ring full! num_descs: %d (%x:%x:%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			    __func__, num_descs, ioat_chan->head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			    ioat_chan->tail, ioat_chan->issued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	/* progress reclaim in the allocation failure case we may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	 * called under bh_disabled so we need to trigger the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	 * event directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (time_is_before_jiffies(ioat_chan->timer.expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	    && timer_pending(&ioat_chan->timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		ioat_timer_event(&ioat_chan->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) static bool desc_has_ext(struct ioat_ring_ent *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	struct ioat_dma_descriptor *hw = desc->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	if (hw->ctl_f.op == IOAT_OP_XOR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	    hw->ctl_f.op == IOAT_OP_XOR_VAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		struct ioat_xor_descriptor *xor = desc->xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	} else if (hw->ctl_f.op == IOAT_OP_PQ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		   hw->ctl_f.op == IOAT_OP_PQ_VAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		struct ioat_pq_descriptor *pq = desc->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	if (!sed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	kmem_cache_free(ioat_sed_cache, sed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	u64 phys_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	u64 completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	completion = *ioat_chan->completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	phys_complete = ioat_chansts_to_addr(completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		(unsigned long long) phys_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	return phys_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 				   u64 *phys_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	*phys_complete = ioat_get_current_completion(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	if (*phys_complete == ioat_chan->last_completion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	struct ioat_dma_descriptor *hw = desc->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	switch (hw->ctl_f.op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	case IOAT_OP_PQ_VAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	case IOAT_OP_PQ_VAL_16S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		struct ioat_pq_descriptor *pq = desc->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		/* check if there's error written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		if (!pq->dwbes_f.wbes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		/* need to set a chanerr var for checking to clear later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		if (pq->dwbes_f.p_val_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			*desc->result |= SUM_CHECK_P_RESULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		if (pq->dwbes_f.q_val_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			*desc->result |= SUM_CHECK_Q_RESULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  * __cleanup - reclaim used descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  * @ioat_chan: channel (ring) to clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  * @phys_complete: zeroed (or not) completion address (from status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	struct ioat_ring_ent *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	bool seen_current = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	int idx = ioat_chan->tail, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	u16 active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	 * At restart of the channel, the completion address and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	 * channel status will be 0 due to starting a new chain. Since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	 * it's new chain and the first descriptor "fails", there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	 * nothing to clean up. We do not want to reap the entire submitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	 * chain due to this 0 address value and then BUG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	if (!phys_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	active = ioat_ring_active(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	for (i = 0; i < active && !seen_current; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		desc = ioat_get_ring_ent(ioat_chan, idx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		dump_desc_dbg(ioat_chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		/* set err stat if we are using dwbes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		if (ioat_dma->cap & IOAT_CAP_DWBES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			desc_get_errstat(ioat_chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		tx = &desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		if (tx->cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			dma_cookie_complete(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			dma_descriptor_unmap(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			dmaengine_desc_get_callback_invoke(tx, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			tx->callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			tx->callback_result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		if (tx->phys == phys_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			seen_current = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		/* skip extended descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		if (desc_has_ext(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 			BUG_ON(i + 1 >= active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		/* cleanup super extended descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		if (desc->sed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			ioat_free_sed(ioat_dma, desc->sed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			desc->sed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	/* finish all descriptor reads before incrementing tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	ioat_chan->tail = idx + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	/* no active descs have written a completion? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	BUG_ON(active && !seen_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	ioat_chan->last_completion = phys_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (active - i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	/* microsecond delay by sysfs variable  per pending descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		writew(min((ioat_chan->intr_coalesce * (active - i)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		       IOAT_INTRDELAY_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		       ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	u64 phys_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	spin_lock_bh(&ioat_chan->cleanup_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		__cleanup(ioat_chan, phys_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	if (is_ioat_halted(*ioat_chan->completion)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		if (chanerr &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		    (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			ioat_eh(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	spin_unlock_bh(&ioat_chan->cleanup_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) void ioat_cleanup_event(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	struct ioatdma_chan *ioat_chan = from_tasklet(ioat_chan, t, cleanup_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	ioat_cleanup(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	if (!test_bit(IOAT_RUN, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	u64 phys_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	/* set the completion address register again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	writel(lower_32_bits(ioat_chan->completion_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	writel(upper_32_bits(ioat_chan->completion_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	ioat_quiesce(ioat_chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		__cleanup(ioat_chan, phys_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	__ioat_restart_chan(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	struct ioat_ring_ent *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	u16 active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	int idx = ioat_chan->tail, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	 * We assume that the failed descriptor has been processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	 * Now we are just returning all the remaining submitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	 * descriptors to abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	active = ioat_ring_active(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	/* we skip the failed descriptor that tail points to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	for (i = 1; i < active; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		desc = ioat_get_ring_ent(ioat_chan, idx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		tx = &desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		if (tx->cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			struct dmaengine_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			dma_cookie_complete(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			dma_descriptor_unmap(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			res.result = DMA_TRANS_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 			dmaengine_desc_get_callback_invoke(tx, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			tx->callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 			tx->callback_result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		/* skip extended descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		if (desc_has_ext(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			WARN_ON(i + 1 >= active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		/* cleanup super extended descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		if (desc->sed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			ioat_free_sed(ioat_dma, desc->sed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			desc->sed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	smp_mb(); /* finish all descriptor reads before incrementing tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	ioat_chan->tail = idx + active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) static void ioat_eh(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	struct pci_dev *pdev = to_pdev(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	struct ioat_dma_descriptor *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	u64 phys_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	struct ioat_ring_ent *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	u32 err_handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	u32 chanerr_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	u32 chanerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	bool abort = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	struct dmaengine_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	/* cleanup so tail points to descriptor that caused the error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		__cleanup(ioat_chan, phys_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		__func__, chanerr, chanerr_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	hw = desc->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	dump_desc_dbg(ioat_chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	switch (hw->ctl_f.op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	case IOAT_OP_XOR_VAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			*desc->result |= SUM_CHECK_P_RESULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	case IOAT_OP_PQ_VAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	case IOAT_OP_PQ_VAL_16S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			*desc->result |= SUM_CHECK_P_RESULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			*desc->result |= SUM_CHECK_Q_RESULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			res.result = DMA_TRANS_READ_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			err_handled |= IOAT_CHANERR_READ_DATA_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		} else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			res.result = DMA_TRANS_WRITE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		abort = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		res.result = DMA_TRANS_NOERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	/* fault on unhandled error or spurious halt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	if (chanerr ^ err_handled || chanerr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			__func__, chanerr, err_handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		dev_err(to_dev(ioat_chan), "Errors handled:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		ioat_print_chanerrs(ioat_chan, err_handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		dev_err(to_dev(ioat_chan), "Errors not handled:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	/* cleanup the faulty descriptor since we are continuing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	tx = &desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (tx->cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		dma_cookie_complete(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		dma_descriptor_unmap(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		dmaengine_desc_get_callback_invoke(tx, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		tx->callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		tx->callback_result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	/* mark faulting descriptor as complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	*ioat_chan->completion = desc->txd.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	spin_lock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	/* we need abort all descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	if (abort) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		ioat_abort_descs(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		/* clean up the channel, we could be in weird state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		ioat_reset_hw(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	ioat_restart_channel(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) static void check_active(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if (ioat_ring_active(ioat_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	spin_lock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	ioat_abort_descs(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	dev_warn(to_dev(ioat_chan), "Reset channel...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	ioat_reset_hw(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	dev_warn(to_dev(ioat_chan), "Restart channel...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	ioat_restart_channel(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	spin_lock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) void ioat_timer_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	dma_addr_t phys_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	u64 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	status = ioat_chansts(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	/* when halted due to errors check for channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	 * programming errors before advancing the completion state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	if (is_ioat_halted(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		u32 chanerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			__func__, chanerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		dev_err(to_dev(ioat_chan), "Errors:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		ioat_print_chanerrs(ioat_chan, chanerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		if (test_bit(IOAT_RUN, &ioat_chan->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			spin_lock_bh(&ioat_chan->cleanup_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			ioat_reboot_chan(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			spin_unlock_bh(&ioat_chan->cleanup_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	spin_lock_bh(&ioat_chan->cleanup_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	/* handle the no-actives case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (!ioat_ring_active(ioat_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		spin_lock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		check_active(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	/* handle the missed cleanup case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		/* timer restarted in ioat_cleanup_preamble
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		 * and IOAT_COMPLETION_ACK cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		__cleanup(ioat_chan, phys_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	/* if we haven't made progress and we have already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	 * acknowledged a pending completion once, then be more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	 * forceful with a restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		u32 chanerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 			status, chanerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		dev_err(to_dev(ioat_chan), "Errors:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		ioat_print_chanerrs(ioat_chan, chanerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			ioat_ring_active(ioat_chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		ioat_reboot_chan(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	/* handle missed issue pending case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	if (ioat_ring_pending(ioat_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		dev_warn(to_dev(ioat_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			"Completion timeout with pending descriptors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		spin_lock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		__ioat_issue_pending(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		spin_unlock_bh(&ioat_chan->prep_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	spin_unlock_bh(&ioat_chan->cleanup_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) enum dma_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	ret = dma_cookie_status(c, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	ioat_cleanup(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	return dma_cookie_status(c, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	/* throw away whatever the channel was doing and get it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	 * initialized, with ioat3 specific workarounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	struct pci_dev *pdev = ioat_dma->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	u32 chanerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	u16 dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	if (ioat_dma->version < IOAT_VER_3_3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		/* clear any pending errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		err = pci_read_config_dword(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 				"channel error register unreachable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		pci_write_config_dword(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		 * (workaround for spurious config parity error after restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			pci_write_config_dword(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 					       0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		dev_err(&pdev->dev, "Failed to reset: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }