^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * IBM Accelerator Family 'GenWQE'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Copyright IBM Corp. 2013
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author: Michael Jung <mijung@gmx.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Author: Michael Ruettger <michael@ibmra.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Device Driver Control Block (DDCB) queue support. Definition of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * interrupt handlers for queue support as well as triggering the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * health monitor code in case of problems. The current hardware uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * an MSI interrupt which is shared between error handling and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * functional code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/crc-itu-t.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "card_base.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "card_ddcb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * N: next DDCB, this is where the next DDCB will be put.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A: active DDCB, this is where the code will look for the next completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * x: DDCB is enqueued, we are waiting for its completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Situation (1): Empty queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * +---+---+---+---+---+---+---+---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * | | | | | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * +---+---+---+---+---+---+---+---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * A/N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * enqueued_ddcbs = A - N = 2 - 2 = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Situation (2): Wrapped, N > A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * +---+---+---+---+---+---+---+---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * | | | x | x | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * +---+---+---+---+---+---+---+---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * A N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * enqueued_ddcbs = N - A = 4 - 2 = 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Situation (3): Queue wrapped, A > N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * +---+---+---+---+---+---+---+---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * | x | x | | | x | x | x | x |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * +---+---+---+---+---+---+---+---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * N A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 2) = 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Situation (4a): Queue full N > A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * +---+---+---+---+---+---+---+---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * | x | x | x | x | x | x | x | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * +---+---+---+---+---+---+---+---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * A N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * enqueued_ddcbs = N - A = 7 - 0 = 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Situation (4a): Queue full A > N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * +---+---+---+---+---+---+---+---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * | x | x | x | | x | x | x | x |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * +---+---+---+---+---+---+---+---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * N A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 3) = 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static int queue_empty(struct ddcb_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return queue->ddcb_next == queue->ddcb_act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int queue_enqueued_ddcbs(struct ddcb_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (queue->ddcb_next >= queue->ddcb_act)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return queue->ddcb_next - queue->ddcb_act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static int queue_free_ddcbs(struct ddcb_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (WARN_ON_ONCE(free_ddcbs < 0)) { /* must never ever happen! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return free_ddcbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Use of the PRIV field in the DDCB for queue debugging:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * (1) Trying to get rid of a DDCB which saw a timeout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * pddcb->priv[6] = 0xcc; # cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * (2) Append a DDCB via NEXT bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * pddcb->priv[7] = 0xaa; # appended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * (3) DDCB needed tapping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * pddcb->priv[7] = 0xbb; # tapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * (4) DDCB marked as correctly finished:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * pddcb->priv[6] = 0xff; # finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline void ddcb_mark_tapped(struct ddcb *pddcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) pddcb->priv[7] = 0xbb; /* tapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline void ddcb_mark_appended(struct ddcb *pddcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) pddcb->priv[7] = 0xaa; /* appended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static inline void ddcb_mark_cleared(struct ddcb *pddcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) pddcb->priv[6] = 0xcc; /* cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline void ddcb_mark_finished(struct ddcb *pddcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) pddcb->priv[6] = 0xff; /* finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static inline void ddcb_mark_unused(struct ddcb *pddcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) pddcb->priv_64 = cpu_to_be64(0); /* not tapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * genwqe_crc16() - Generate 16-bit crc as required for DDCBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * @buff: pointer to data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * @len: length of data for calculation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @init: initial crc (0xffff at start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Polynomial = x^16 + x^12 + x^5 + 1 (0x1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * Example: 4 bytes 0x01 0x02 0x03 0x04 with init = 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * should result in a crc16 of 0x89c3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Return: crc16 checksum in big endian format !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static inline u16 genwqe_crc16(const u8 *buff, size_t len, u16 init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return crc_itu_t(init, buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct ddcb *pddcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) spin_lock_irqsave(&cd->print_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dev_info(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) "DDCB list for card #%d (ddcb_act=%d / ddcb_next=%d):\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) cd->card_idx, queue->ddcb_act, queue->ddcb_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) pddcb = queue->ddcb_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) for (i = 0; i < queue->ddcb_max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) " %c %-3d: RETC=%03x SEQ=%04x HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) i == queue->ddcb_act ? '>' : ' ',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) be16_to_cpu(pddcb->retc_16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) be16_to_cpu(pddcb->seqnum_16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) pddcb->hsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pddcb->shi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) be64_to_cpu(pddcb->priv_64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) pddcb->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) pddcb++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) spin_unlock_irqrestore(&cd->print_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct genwqe_ddcb_cmd *ddcb_requ_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct ddcb_requ *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) req = kzalloc(sizeof(*req), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return &req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return req->req_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static inline void ddcb_requ_set_state(struct ddcb_requ *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) enum genwqe_requ_state new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) req->req_state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return req->cmd.ddata_addr != 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * ddcb_requ_finished() - Returns the hardware state of the associated DDCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * @cd: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * @req: DDCB work request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * Status of ddcb_requ mirrors this hardware state, but is copied in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * the ddcb_requ on interrupt/polling function. The lowlevel code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * should check the hardware state directly, the higher level code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * should check the copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * This function will also return true if the state of the queue is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * not GENWQE_CARD_USED. This enables us to purge all DDCBs in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * shutdown case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) (cd->card_state != GENWQE_CARD_USED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #define RET_DDCB_APPENDED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define RET_DDCB_TAPPED 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * enqueue_ddcb() - Enqueue a DDCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * @cd: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * @queue: queue this operation should be done on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * @pddcb: pointer to ddcb structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * @ddcb_no: pointer to ddcb number being tapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * Start execution of DDCB by tapping or append to queue via NEXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * bit. This is done by an atomic 'compare and swap' instruction and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * checking SHI and HSI of the previous DDCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * This function must only be called with ddcb_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Return: 1 if new DDCB is appended to previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * 2 if DDCB queue is tapped via register/simulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct ddcb *pddcb, int ddcb_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned int try;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int prev_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct ddcb *prev_ddcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) __be32 old, new, icrc_hsi_shi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u64 num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * For performance checks a Dispatch Timestamp can be put into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * DDCB It is supposed to use the SLU's free running counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * but this requires PCIe cycles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ddcb_mark_unused(pddcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* check previous DDCB if already fetched */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) prev_ddcb = &queue->ddcb_vaddr[prev_no];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * It might have happened that the HSI.FETCHED bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * set. Retry in this case. Therefore I expect maximum 2 times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * trying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ddcb_mark_appended(pddcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) for (try = 0; try < 2; try++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) old = prev_ddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* try to append via NEXT bit if prev DDCB is not completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if ((old & DDCB_COMPLETED_BE32) != 0x00000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) new = (old | DDCB_NEXT_BE32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) wmb(); /* need to ensure write ordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (icrc_hsi_shi == old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return RET_DDCB_APPENDED; /* appended to queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* Queue must be re-started by updating QUEUE_OFFSET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ddcb_mark_tapped(pddcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) num = (u64)ddcb_no << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) wmb(); /* need to ensure write ordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return RET_DDCB_TAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * copy_ddcb_results() - Copy output state from real DDCB to request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * @req: pointer to requsted DDCB parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * @ddcb_no: pointer to ddcb number being tapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * Copy DDCB ASV to request struct. There is no endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * conversion made, since data structure in ASV is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * unknown here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * This is needed by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * - genwqe_purge_ddcb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * - genwqe_check_ddcb_queue()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct ddcb_queue *queue = req->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct ddcb *pddcb = &queue->ddcb_vaddr[req->num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* copy status flags of the variant part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) req->cmd.vcrc = be16_to_cpu(pddcb->vcrc_16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) req->cmd.attn = be16_to_cpu(pddcb->attn_16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) req->cmd.progress = be32_to_cpu(pddcb->progress_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) req->cmd.retc = be16_to_cpu(pddcb->retc_16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (ddcb_requ_collect_debug_data(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int prev_no = (ddcb_no == 0) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) queue->ddcb_max - 1 : ddcb_no - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) memcpy(&req->debug_data.ddcb_finished, pddcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) sizeof(req->debug_data.ddcb_finished));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) memcpy(&req->debug_data.ddcb_prev, prev_pddcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) sizeof(req->debug_data.ddcb_prev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * @cd: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * @queue: queue to be checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * Return: Number of DDCBs which were finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static int genwqe_check_ddcb_queue(struct genwqe_dev *cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct ddcb_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int ddcbs_finished = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) spin_lock_irqsave(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* FIXME avoid soft locking CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct ddcb *pddcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct ddcb_requ *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) u16 vcrc, vcrc_16, retc_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 0x00000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) goto go_home; /* not completed, continue waiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) wmb(); /* Add sync to decouple prev. read operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* Note: DDCB could be purged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) req = queue->ddcb_req[queue->ddcb_act];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (req == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* this occurs if DDCB is purged, not an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* Move active DDCB further; Nothing to do anymore. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) goto pick_next_one;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * HSI=0x44 (fetched and completed), but RETC is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * 0x101, or even worse 0x000.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * In case of seeing the queue in inconsistent state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * we read the errcnts and the queue status to provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * a trigger for our PCIe analyzer stop capturing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) retc_16 = be16_to_cpu(pddcb->retc_16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if ((pddcb->hsi == 0x44) && (retc_16 <= 0x101)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) u64 errcnts, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) "[%s] SEQN=%04x HSI=%02x RETC=%03x Q_ERRCNTS=%016llx Q_STATUS=%016llx DDCB_DMA_ADDR=%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) __func__, be16_to_cpu(pddcb->seqnum_16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) pddcb->hsi, retc_16, errcnts, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) queue->ddcb_daddr + ddcb_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) copy_ddcb_results(req, queue->ddcb_act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ddcb_mark_finished(pddcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /* calculate CRC_16 to see if VCRC is correct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) vcrc = genwqe_crc16(pddcb->asv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) VCRC_LENGTH(req->cmd.asv_length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) vcrc_16 = be16_to_cpu(pddcb->vcrc_16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (vcrc != vcrc_16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) printk_ratelimited(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) "%s %s: err: wrong VCRC pre=%02x vcrc_len=%d bytes vcrc_data=%04x is not vcrc_card=%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) GENWQE_DEVNAME, dev_name(&pci_dev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) pddcb->pre, VCRC_LENGTH(req->cmd.asv_length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) vcrc, vcrc_16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) queue->ddcbs_completed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) queue->ddcbs_in_flight--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* wake up process waiting for this DDCB, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) processes on the busy queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) wake_up_interruptible(&queue->busy_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) pick_next_one:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ddcbs_finished++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) go_home:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) spin_unlock_irqrestore(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return ddcbs_finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * __genwqe_wait_ddcb(): Waits until DDCB is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * @cd: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * @req: pointer to requsted DDCB parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * The Service Layer will update the RETC in DDCB when processing is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * pending or done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * Return: > 0 remaining jiffies, DDCB completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * -ETIMEDOUT when timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * -ERESTARTSYS when ^C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * -EINVAL when unknown error condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * When an error is returned the called needs to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * purge_ddcb() is being called to get the &req removed from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) unsigned int ddcb_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct ddcb_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (req == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) queue = req->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (queue == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ddcb_no = req->num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (ddcb_no >= queue->ddcb_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ddcb_requ_finished(cd, req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) GENWQE_DDCB_SOFTWARE_TIMEOUT * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * We need to distinguish 3 cases here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * 1. rc == 0 timeout occured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * 2. rc == -ERESTARTSYS signal received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * 3. rc > 0 remaining jiffies condition is true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct ddcb_queue *queue = req->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct ddcb *pddcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * Timeout may be caused by long task switching time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * When timeout happens, check if the request has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * meanwhile completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) genwqe_check_ddcb_queue(cd, req->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (ddcb_requ_finished(cd, req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) "[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) __func__, req->num, rc, ddcb_requ_get_state(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) "[%s] IO_QUEUE_STATUS=0x%016llx\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) __genwqe_readq(cd, queue->IO_QUEUE_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) pddcb = &queue->ddcb_vaddr[req->num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) print_ddcb_info(cd, req->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) } else if (rc == -ERESTARTSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * EINTR: Stops the application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * ERESTARTSYS: Restartable systemcall; called again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) } else if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) "[%s] err: DDCB#%d unknown result (rc=%d) %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) __func__, req->num, rc, ddcb_requ_get_state(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* Severe error occured. Driver is forced to stop operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (cd->card_state != GENWQE_CARD_USED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) "[%s] err: DDCB#%d forced to stop (rc=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) __func__, req->num, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * get_next_ddcb() - Get next available DDCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * @cd: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * @queue: DDCB queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * @num: internal DDCB number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * DDCB's content is completely cleared but presets for PRE and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * SEQNUM. This function must only be called when ddcb_lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Return: NULL if no empty DDCB available otherwise ptr to next DDCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static struct ddcb *get_next_ddcb(struct genwqe_dev *cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct ddcb_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int *num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) u64 *pu64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct ddcb *pddcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (queue_free_ddcbs(queue) == 0) /* queue is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* find new ddcb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) pddcb = &queue->ddcb_vaddr[queue->ddcb_next];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* if it is not completed, we are not allowed to use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* barrier(); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == 0x00000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) *num = queue->ddcb_next; /* internal DDCB number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* clear important DDCB fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) pu64 = (u64 *)pddcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) pu64[0] = 0ULL; /* offs 0x00 (ICRC,HSI,SHI,...) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) pu64[1] = 0ULL; /* offs 0x01 (ACFUNC,CMD...) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* destroy previous results in ASV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) pu64[0x80/8] = 0ULL; /* offs 0x80 (ASV + 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) pu64[0x88/8] = 0ULL; /* offs 0x88 (ASV + 0x08) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) pu64[0x90/8] = 0ULL; /* offs 0x90 (ASV + 0x10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) pu64[0x98/8] = 0ULL; /* offs 0x98 (ASV + 0x18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) pu64[0xd0/8] = 0ULL; /* offs 0xd0 (RETC,ATTN...) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) pddcb->pre = DDCB_PRESET_PRE; /* 128 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return pddcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * __genwqe_purge_ddcb() - Remove a DDCB from the workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * @req: DDCB request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * This will fail when the request was already FETCHED. In this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * we need to wait until it is finished. Else the DDCB can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * reused. This function also ensures that the request data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * is removed from ddcb_req[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * Do not forget to call this function when genwqe_wait_ddcb() fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * such that the request gets really removed from ddcb_req[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * Return: 0 success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct ddcb *pddcb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) unsigned int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct ddcb_queue *queue = req->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) u64 queue_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) __be32 icrc_hsi_shi = 0x0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) __be32 old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* unsigned long flags; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (GENWQE_DDCB_SOFTWARE_TIMEOUT <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) "[%s] err: software timeout is not set!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) pddcb = &queue->ddcb_vaddr[req->num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) for (t = 0; t < GENWQE_DDCB_SOFTWARE_TIMEOUT * 10; t++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) spin_lock_irqsave(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* Check if req was meanwhile finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) goto go_home;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* try to set PURGE bit if FETCHED/COMPLETED are not set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) old = pddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if ((old & DDCB_FETCHED_BE32) == 0x00000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) new = (old | DDCB_PURGE_BE32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) icrc_hsi_shi = cmpxchg(&pddcb->icrc_hsi_shi_32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (icrc_hsi_shi == old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) goto finish_ddcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* normal finish with HSI bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (icrc_hsi_shi & DDCB_COMPLETED_BE32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) goto finish_ddcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) spin_unlock_irqrestore(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * Here the check_ddcb() function will most likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * discover this DDCB to be finished some point in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * time. It will mark the req finished and free it up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * in the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) copy_ddcb_results(req, req->num); /* for the failing case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) msleep(100); /* sleep for 1/10 second and try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) finish_ddcb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) copy_ddcb_results(req, req->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) queue->ddcbs_in_flight--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) queue->ddcb_req[req->num] = NULL; /* delete from array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ddcb_mark_cleared(pddcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* Move active DDCB further; Nothing to do here anymore. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * We need to ensure that there is at least one free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * DDCB in the queue. To do that, we must update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * ddcb_act only if the COMPLETED bit is set for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * DDCB we are working on else we treat that DDCB even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * if we PURGED it as occupied (hardware is supposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * to set the COMPLETED bit yet!).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if ((icrc_hsi_shi & DDCB_COMPLETED_BE32) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) (queue->ddcb_act == req->num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) queue->ddcb_act = ((queue->ddcb_act + 1) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) queue->ddcb_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) go_home:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) spin_unlock_irqrestore(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * If the card is dead and the queue is forced to stop, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * might see this in the queue status register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) "[%s] err: DDCB#%d not purged and not completed after %d seconds QSTAT=%016llx!!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) __func__, req->num, GENWQE_DDCB_SOFTWARE_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) queue_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) print_ddcb_info(cd, req->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (d == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) "[%s] err: invalid memory for debug data!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) len = sizeof(d->driver_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) snprintf(d->driver_version, len, "%s", DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) d->slu_unitcfg = cd->slu_unitcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) d->app_unitcfg = cd->app_unitcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * __genwqe_enqueue_ddcb() - Enqueue a DDCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * @cd: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * @req: pointer to DDCB execution request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * @f_flags: file mode: blocking, non-blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * Return: 0 if enqueuing succeeded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * -EIO if card is unusable/PCIe problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * -EBUSY if enqueuing failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) unsigned int f_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct ddcb *pddcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct ddcb_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) u16 icrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (cd->card_state != GENWQE_CARD_USED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) printk_ratelimited(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) GENWQE_DEVNAME, dev_name(&pci_dev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) __func__, req->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) queue = req->queue = &cd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* FIXME circumvention to improve performance when no irq is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (GENWQE_POLLING_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) genwqe_check_ddcb_queue(cd, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * It must be ensured to process all DDCBs in successive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * order. Use a lock here in order to prevent nested DDCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * enqueuing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) spin_lock_irqsave(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (pddcb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) spin_unlock_irqrestore(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (f_flags & O_NONBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) queue->return_on_busy++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) queue->wait_on_busy++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) rc = wait_event_interruptible(queue->busy_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) queue_free_ddcbs(queue) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) dev_dbg(&pci_dev->dev, "[%s] waiting for free DDCB: rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (rc == -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return rc; /* interrupted by a signal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (queue->ddcb_req[req->num] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) spin_unlock_irqrestore(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) "[%s] picked DDCB %d with req=%p still in use!!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) __func__, req->num, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) queue->ddcb_req[req->num] = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) pddcb->cmd = req->cmd.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) pddcb->acfunc = req->cmd.acfunc; /* functional unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * We know that we can get retc 0x104 with CRC error, do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * stop the queue in those cases for this command. XDIR = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * does not work for old SLU versions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * Last bitstream with the old XDIR behavior had SLU_ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * 0x34199.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if ((cd->slu_unitcfg & 0xFFFF0ull) > 0x34199ull)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) pddcb->xdir = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) pddcb->xdir = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) pddcb->psp = (((req->cmd.asiv_length / 8) << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ((req->cmd.asv_length / 8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * If copying the whole DDCB_ASIV_LENGTH is impacting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * performance we need to change it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * req->cmd.asiv_length. But simulation benefits from some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * non-architectured bits behind the architectured content.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * How much data is copied depends on the availability of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * ATS field, which was introduced late. If the ATS field is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * supported ASIV is 8 bytes shorter than it used to be. Since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * the ATS field is copied too, the code should do exactly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * what it did before, but I wanted to make copying of the ATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * field very explicit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (genwqe_get_slu_id(cd) <= 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) memcpy(&pddcb->__asiv[0], /* destination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) &req->cmd.__asiv[0], /* source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) DDCB_ASIV_LENGTH); /* req->cmd.asiv_length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) memcpy(&pddcb->n.asiv[0], /* destination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) &req->cmd.asiv[0], /* source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) DDCB_ASIV_LENGTH_ATS); /* req->cmd.asiv_length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) pddcb->icrc_hsi_shi_32 = cpu_to_be32(0x00000000); /* for crc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * Calculate CRC_16 for corresponding range PSP(7:4). Include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * empty 4 bytes prior to the data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) icrc = genwqe_crc16((const u8 *)pddcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) ICRC_LENGTH(req->cmd.asiv_length), 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /* enable DDCB completion irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (!GENWQE_POLLING_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (ddcb_requ_collect_debug_data(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* use the kernel copy of debug data. copying back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) user buffer happens later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) genwqe_init_debug_data(cd, &req->debug_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) memcpy(&req->debug_data.ddcb_before, pddcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) sizeof(req->debug_data.ddcb_before));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) enqueue_ddcb(cd, queue, pddcb, req->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) queue->ddcbs_in_flight++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) queue->ddcbs_max_in_flight = queue->ddcbs_in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) ddcb_requ_set_state(req, GENWQE_REQU_TAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) spin_unlock_irqrestore(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) wake_up_interruptible(&cd->queue_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * __genwqe_execute_raw_ddcb() - Setup and execute DDCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * @cd: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * @cmd: user provided DDCB command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * @f_flags: file mode: blocking, non-blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct genwqe_ddcb_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) unsigned int f_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (cmd->asiv_length > DDCB_ASIV_LENGTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) __func__, cmd->asiv_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (cmd->asv_length > DDCB_ASV_LENGTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) __func__, cmd->asiv_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) rc = __genwqe_enqueue_ddcb(cd, req, f_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) rc = __genwqe_wait_ddcb(cd, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (rc < 0) /* error or signal interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) goto err_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (ddcb_requ_collect_debug_data(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (copy_to_user((struct genwqe_debug_data __user *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) (unsigned long)cmd->ddata_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) &req->debug_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) sizeof(struct genwqe_debug_data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * Higher values than 0x102 indicate completion with faults,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * lower values than 0x102 indicate processing faults. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * that DDCB might have been purged. E.g. Cntl+C.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (cmd->retc != DDCB_RETC_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* This might happen e.g. flash read, and needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) handled by the upper layer code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) rc = -EBADMSG; /* not processed/error retc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) err_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) __genwqe_purge_ddcb(cd, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (ddcb_requ_collect_debug_data(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (copy_to_user((struct genwqe_debug_data __user *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) (unsigned long)cmd->ddata_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) &req->debug_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) sizeof(struct genwqe_debug_data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * @cd: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * We use this as condition for our wait-queue code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct ddcb *pddcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct ddcb_queue *queue = &cd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) spin_lock_irqsave(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (queue_empty(queue)) { /* emtpy queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) spin_unlock_irqrestore(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) { /* ddcb ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) spin_unlock_irqrestore(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) spin_unlock_irqrestore(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * @cd: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * Keep track on the number of DDCBs which ware currently in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * queue. This is needed for statistics as well as conditon if we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * to wait or better do polling in case of no interrupts available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) int genwqe_ddcbs_in_flight(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) int ddcbs_in_flight = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct ddcb_queue *queue = &cd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) spin_lock_irqsave(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) ddcbs_in_flight += queue->ddcbs_in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) spin_unlock_irqrestore(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return ddcbs_in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct ddcb *pddcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) u64 val64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) unsigned int queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (GENWQE_DDCB_MAX < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) queue_size = roundup(GENWQE_DDCB_MAX * sizeof(struct ddcb), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) queue->ddcbs_in_flight = 0; /* statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) queue->ddcbs_max_in_flight = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) queue->ddcbs_completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) queue->return_on_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) queue->wait_on_busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) queue->ddcb_seq = 0x100; /* start sequence number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) queue->ddcb_max = GENWQE_DDCB_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) &queue->ddcb_daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (queue->ddcb_vaddr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) "[%s] **err: could not allocate DDCB **\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) queue->ddcb_req = kcalloc(queue->ddcb_max, sizeof(struct ddcb_requ *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (!queue->ddcb_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) goto free_ddcbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) queue->ddcb_waitqs = kcalloc(queue->ddcb_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) sizeof(wait_queue_head_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (!queue->ddcb_waitqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) goto free_requs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) for (i = 0; i < queue->ddcb_max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) pddcb = &queue->ddcb_vaddr[i]; /* DDCBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) pddcb->icrc_hsi_shi_32 = DDCB_COMPLETED_BE32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) pddcb->retc_16 = cpu_to_be16(0xfff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) queue->ddcb_req[i] = NULL; /* requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) queue->ddcb_act = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) queue->ddcb_next = 0; /* queue is empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) spin_lock_init(&queue->ddcb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) init_waitqueue_head(&queue->busy_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) free_requs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) kfree(queue->ddcb_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) queue->ddcb_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) free_ddcbs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) queue->ddcb_daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) queue->ddcb_vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) queue->ddcb_daddr = 0ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static int ddcb_queue_initialized(struct ddcb_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return queue->ddcb_vaddr != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) unsigned int queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) kfree(queue->ddcb_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) queue->ddcb_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (queue->ddcb_vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) queue->ddcb_daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) queue->ddcb_vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) queue->ddcb_daddr = 0ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static irqreturn_t genwqe_pf_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) u64 gfir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * In case of fatal FIR error the queue is stopped, such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * we can safely check it without risking anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) cd->irqs_processed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) wake_up_interruptible(&cd->queue_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * Checking for errors before kicking the queue might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * safer, but slower for the good-case ... See above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (((gfir & GFIR_ERR_TRIGGER) != 0x0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) !pci_channel_offline(pci_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (cd->use_platform_recovery) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * Since we use raw accessors, EEH errors won't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * detected by the platform until we do a non-raw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * MMIO or config space read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) readq(cd->mmio + IO_SLC_CFGREG_GFIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /* Don't do anything if the PCI channel is frozen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (pci_channel_offline(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) wake_up_interruptible(&cd->health_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * By default GFIRs causes recovery actions. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * count is just for debug when recovery is masked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) dev_err_ratelimited(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) "[%s] GFIR=%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) __func__, gfir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) cd->irqs_processed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) wake_up_interruptible(&cd->queue_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * genwqe_card_thread() - Work thread for the DDCB queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * @data: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * The idea is to check if there are DDCBs in processing. If there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * some finished DDCBs, we process them and wakeup the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * requestors. Otherwise we give other processes time using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * cond_resched().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static int genwqe_card_thread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) int should_stop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct genwqe_dev *cd = (struct genwqe_dev *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) genwqe_check_ddcb_queue(cd, &cd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (GENWQE_POLLING_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) wait_event_interruptible_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) cd->queue_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) genwqe_ddcbs_in_flight(cd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) (should_stop = kthread_should_stop()), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) wait_event_interruptible_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) cd->queue_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) genwqe_next_ddcb_ready(cd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) (should_stop = kthread_should_stop()), HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (should_stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * Avoid soft lockups on heavy loads; we do not want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * to disable our interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * genwqe_setup_service_layer() - Setup DDCB queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * @cd: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * Allocate DDCBs. Configure Service Layer Controller (SLC).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * Return: 0 success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) int genwqe_setup_service_layer(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) struct ddcb_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (genwqe_is_privileged(cd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) rc = genwqe_card_reset(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) "[%s] err: reset failed.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) genwqe_read_softreset(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) queue = &cd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) rc = setup_ddcb_queue(cd, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) init_waitqueue_head(&cd->queue_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) cd->card_thread = kthread_run(genwqe_card_thread, cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) GENWQE_DEVNAME "%d_thread",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) cd->card_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (IS_ERR(cd->card_thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) rc = PTR_ERR(cd->card_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) cd->card_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) goto stop_free_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) goto stop_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * We must have all wait-queues initialized when we enable the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * interrupts. Otherwise we might crash if we get an early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) init_waitqueue_head(&cd->health_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (genwqe_is_privileged(cd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) rc = request_irq(pci_dev->irq, genwqe_pf_isr, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) GENWQE_DEVNAME, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) rc = request_irq(pci_dev->irq, genwqe_vf_isr, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) GENWQE_DEVNAME, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) dev_err(&pci_dev->dev, "irq %d not free.\n", pci_dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) goto stop_irq_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) cd->card_state = GENWQE_CARD_USED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) stop_irq_cap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) genwqe_reset_interrupt_capability(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) stop_kthread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) kthread_stop(cd->card_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) cd->card_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) stop_free_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) free_ddcb_queue(cd, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * queue_wake_up_all() - Handles fatal error case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * @cd: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * The PCI device got unusable and we have to stop all pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * requests as fast as we can. The code after this must purge the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * DDCBs in question and ensure that all mappings are freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) static int queue_wake_up_all(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) struct ddcb_queue *queue = &cd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) spin_lock_irqsave(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) for (i = 0; i < queue->ddcb_max; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) wake_up_interruptible(&queue->busy_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) spin_unlock_irqrestore(&queue->ddcb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * genwqe_finish_queue() - Remove any genwqe devices and user-interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * @cd: pointer to genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * Relies on the pre-condition that there are no users of the card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * device anymore e.g. with open file-descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * This function must be robust enough to be called twice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) int genwqe_finish_queue(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) int i, rc = 0, in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int waitmax = GENWQE_DDCB_SOFTWARE_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct ddcb_queue *queue = &cd->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (!ddcb_queue_initialized(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* Do not wipe out the error state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (cd->card_state == GENWQE_CARD_USED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) cd->card_state = GENWQE_CARD_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /* Wake up all requests in the DDCB queue such that they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) should be removed nicely. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) queue_wake_up_all(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /* We must wait to get rid of the DDCBs in flight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) for (i = 0; i < waitmax; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) in_flight = genwqe_ddcbs_in_flight(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (in_flight == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) dev_dbg(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) " DEBUG [%d/%d] waiting for queue to get empty: %d requests!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) i, waitmax, in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * Severe severe error situation: The card itself has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * 16 DDCB queues, each queue has e.g. 32 entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * each DDBC has a hardware timeout of currently 250
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * msec but the PFs have a hardware timeout of 8 sec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * ... so I take something large.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (i == waitmax) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * genwqe_release_service_layer() - Shutdown DDCB queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * This function must be robust enough to be called twice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) int genwqe_release_service_layer(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (!ddcb_queue_initialized(&cd->queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) free_irq(pci_dev->irq, cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) genwqe_reset_interrupt_capability(cd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (cd->card_thread != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) kthread_stop(cd->card_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) cd->card_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) free_ddcb_queue(cd, &cd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }