^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * IBM Accelerator Family 'GenWQE'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Copyright IBM Corp. 2013
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author: Michael Jung <mijung@gmx.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Author: Michael Ruettger <michael@ibmra.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Miscelanous functionality used in the other GenWQE driver parts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/page-flags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "genwqe_driver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "card_base.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "card_ddcb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * __genwqe_writeq() - Write 64-bit register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * @byte_offs: byte offset within BAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * @val: 64-bit value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Return: 0 if success; < 0 if error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (cd->mmio == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (pci_channel_offline(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * __genwqe_readq() - Read 64-bit register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @byte_offs: offset within BAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Return: value from register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return 0xffffffffffffffffull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) (byte_offs == IO_SLC_CFGREG_GFIR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return 0x000000000000ffffull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) (byte_offs == IO_SLC_CFGREG_GFIR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return 0x00000000ffff0000ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (cd->mmio == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return 0xffffffffffffffffull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * __genwqe_writel() - Write 32-bit register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @byte_offs: byte offset within BAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * @val: 32-bit value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Return: 0 if success; < 0 if error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (cd->mmio == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (pci_channel_offline(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * __genwqe_readl() - Read 32-bit register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * @byte_offs: offset within BAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Return: Value from register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (cd->mmio == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * genwqe_read_app_id() - Extract app_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * @app_name: carrier used to pass-back name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * @len: length of data for name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * app_unitcfg need to be filled with valid data first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u32 app_id = (u32)cd->app_unitcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) memset(app_name, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) for (i = 0, j = 0; j < min(len, 4); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) char ch = (char)((app_id >> (24 - j*8)) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (ch == ' ')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) app_name[i++] = isprint(ch) ? ch : 'X';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Existing kernel functions seem to use a different polynom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * therefore we could not use them here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Genwqe's Polynomial = 0x20044009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define CRC32_POLYNOMIAL 0x20044009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static u32 crc32_tab[256]; /* crc32 lookup table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) void genwqe_init_crc32(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) for (i = 0; i < 256; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) crc = i << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) for (j = 0; j < 8; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (crc & 0x80000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) crc = (crc << 1) ^ CRC32_POLYNOMIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) crc = (crc << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) crc32_tab[i] = crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * genwqe_crc32() - Generate 32-bit crc as required for DDCBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @buff: pointer to data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * @len: length of data for calculation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * @init: initial crc (0xffffffff at start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * result in a crc32 of 0xf33cb7d3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * The existing kernel crc functions did not cover this polynom yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Return: crc32 checksum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) crc = init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) while (len--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) i = ((crc >> 24) ^ *buff++) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) crc = (crc << 8) ^ crc32_tab[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) dma_addr_t *dma_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (get_order(size) >= MAX_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) void *vaddr, dma_addr_t dma_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (vaddr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dma_free_coherent(&cd->pci_dev->dev, size, vaddr, dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) pci_unmap_page(pci_dev, dma_list[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) dma_list[i] = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static int genwqe_map_pages(struct genwqe_dev *cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct page **page_list, int num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) dma_addr_t *dma_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* establish DMA mapping for requested pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) for (i = 0; i < num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) dma_addr_t daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dma_list[i] = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) daddr = pci_map_page(pci_dev, page_list[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 0, /* map_offs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) PCI_DMA_BIDIRECTIONAL); /* FIXME rd/rw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (pci_dma_mapping_error(pci_dev, daddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) "[%s] err: no dma addr daddr=%016llx!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) __func__, (long long)daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) dma_list[i] = daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) genwqe_unmap_pages(cd, dma_list, num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int genwqe_sgl_size(int num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int len, num_tlb = num_pages / 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return roundup(len, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Allocates memory for sgl and overlapping pages. Pages which might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * overlap other user-space memory blocks are being cached for DMAs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * such that we do not run into syncronization issues. Data is copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * from user-space into the cached pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) void __user *user_addr, size_t user_size, int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) __func__, user_addr, user_size, sgl->nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) sgl->user_addr = user_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) sgl->user_size = user_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) sgl->write = write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (get_order(sgl->sgl_size) > MAX_ORDER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) "[%s] err: too much memory requested!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) &sgl->sgl_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (sgl->sgl == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) "[%s] err: no memory available!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* Only use buffering on incomplete pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) &sgl->fpage_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (sgl->fpage == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* Sync with user memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (copy_from_user(sgl->fpage + sgl->fpage_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) user_addr, sgl->fpage_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (sgl->lpage_size != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) &sgl->lpage_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (sgl->lpage == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) goto err_out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* Sync with user memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (copy_from_user(sgl->lpage, user_addr + user_size -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) sgl->lpage_size, sgl->lpage_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) goto err_out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) err_out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) sgl->lpage_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) sgl->lpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) sgl->lpage_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) err_out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) sgl->fpage_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) sgl->fpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) sgl->fpage_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) sgl->sgl_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) sgl->sgl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) sgl->sgl_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) sgl->sgl_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dma_addr_t *dma_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int i = 0, j = 0, p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) unsigned long dma_offs, map_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) dma_addr_t prev_daddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct sg_entry *s, *last_s = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) size_t size = sgl->user_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) dma_offs = 128; /* next block if needed/dma_offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) map_offs = sgl->fpage_offs; /* offset in first page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) s = &sgl->sgl[0]; /* first set of 8 entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) p = 0; /* page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) while (p < sgl->nr_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) dma_addr_t daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) unsigned int size_to_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* always write the chaining entry, cleanup is done later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) s[j].len = cpu_to_be32(128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) s[j].flags = cpu_to_be32(SG_CHAINED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) while (j < 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* DMA mapping for requested page, offs, size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) size_to_map = min(size, PAGE_SIZE - map_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if ((p == 0) && (sgl->fpage != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) daddr = sgl->fpage_dma_addr + map_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) } else if ((p == sgl->nr_pages - 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) (sgl->lpage != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) daddr = sgl->lpage_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) daddr = dma_list[p] + map_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) size -= size_to_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) map_offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (prev_daddr == daddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) u32 prev_len = be32_to_cpu(last_s->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* pr_info("daddr combining: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) "%016llx/%08x -> %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) prev_daddr, prev_len, daddr); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) last_s->len = cpu_to_be32(prev_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) size_to_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) p++; /* process next page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (p == sgl->nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) goto fixup; /* nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) prev_daddr = daddr + size_to_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* start new entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) s[j].target_addr = cpu_to_be64(daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) s[j].len = cpu_to_be32(size_to_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) s[j].flags = cpu_to_be32(SG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) prev_daddr = daddr + size_to_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) last_s = &s[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) p++; /* process next page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (p == sgl->nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) goto fixup; /* nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dma_offs += 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) s += 8; /* continue 8 elements further */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) fixup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (j == 1) { /* combining happened on last entry! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) s -= 8; /* full shift needed on previous sgl block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) j = 7; /* shift all elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) for (i = 0; i < j; i++) /* move elements 1 up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) s[i] = s[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) s[i].target_addr = cpu_to_be64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) s[i].len = cpu_to_be32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) s[i].flags = cpu_to_be32(SG_END_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * @sgl: scatter gather list describing user-space memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * After the DMA transfer has been completed we free the memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * the sgl and the cached pages. Data is being transferred from cached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * pages into user-space buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) unsigned long res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (sgl->fpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (sgl->write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) res = copy_to_user(sgl->user_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) "[%s] err: copying fpage! (res=%lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) __func__, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) sgl->fpage_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) sgl->fpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) sgl->fpage_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (sgl->lpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (sgl->write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) offset = sgl->user_size - sgl->lpage_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) sgl->lpage_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dev_err(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) "[%s] err: copying lpage! (res=%lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) __func__, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) sgl->lpage_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) sgl->lpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) sgl->lpage_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) sgl->sgl_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) sgl->sgl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) sgl->sgl_dma_addr = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) sgl->sgl_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * genwqe_user_vmap() - Map user-space memory to virtual kernel memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * @cd: pointer to genwqe device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * @m: mapping params
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * @uaddr: user virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * @size: size of memory to be mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * We need to think about how we could speed this up. Of course it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * not a good idea to do this over and over again, like we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * currently doing it. Nevertheless, I am curious where on the path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * the performance is spend. Most probably within the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * allocation functions, but maybe also in the DMA mapping code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * Restrictions: The maximum size of the possible mapping currently depends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * on the amount of memory we can get using kzalloc() for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * page_list and pci_alloc_consistent for the sg_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * The sg_list is currently itself not scattered, which could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * be fixed with some effort. The page_list must be split into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * PAGE_SIZE chunks too. All that will make the complicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * code more complicated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * Return: 0 if success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) unsigned long data, offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if ((uaddr == NULL) || (size == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) m->size = 0; /* mark unused and not added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) m->u_vaddr = uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) m->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* determine space needed for page_list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) data = (unsigned long)uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) offs = offset_in_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (size > ULONG_MAX - PAGE_SIZE - offs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) m->size = 0; /* mark unused and not added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) m->page_list = kcalloc(m->nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) sizeof(struct page *) + sizeof(dma_addr_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (!m->page_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dev_err(&pci_dev->dev, "err: alloc page_list failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) m->nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) m->u_vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) m->size = 0; /* mark unused and not added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* pin user pages in memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) rc = pin_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) m->nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) m->write ? FOLL_WRITE : 0, /* readable/writable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) m->page_list); /* ptrs to pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) goto fail_pin_user_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* assumption: pin_user_pages can be killed by signals. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (rc < m->nr_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) unpin_user_pages_dirty_lock(m->page_list, rc, m->write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) goto fail_pin_user_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) goto fail_free_user_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) fail_free_user_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) unpin_user_pages_dirty_lock(m->page_list, m->nr_pages, m->write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) fail_pin_user_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) kfree(m->page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) m->page_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) m->dma_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) m->nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) m->u_vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) m->size = 0; /* mark unused and not added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * @cd: pointer to genwqe device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * @m: mapping params
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (!dma_mapping_used(m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) __func__, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (m->dma_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (m->page_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) unpin_user_pages_dirty_lock(m->page_list, m->nr_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) m->write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) kfree(m->page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) m->page_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) m->dma_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) m->nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) m->u_vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) m->size = 0; /* mark as unused and not added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * genwqe_card_type() - Get chip type SLU Configuration Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * @cd: pointer to the genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * Return: 0: Altera Stratix-IV 230
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * 1: Altera Stratix-IV 530
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * 2: Altera Stratix-V A4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * 3: Altera Stratix-V A7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) u8 genwqe_card_type(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) u64 card_type = cd->slu_unitcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * genwqe_card_reset() - Reset the card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * @cd: pointer to the genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) int genwqe_card_reset(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) u64 softrst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct pci_dev *pci_dev = cd->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (!genwqe_is_privileged(cd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* new SL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) __genwqe_readq(cd, IO_HSU_FIR_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) __genwqe_readq(cd, IO_APP_FIR_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) __genwqe_readq(cd, IO_SLU_FIR_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * Read-modify-write to preserve the stealth bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * For SL >= 039, Stealth WE bit allows removing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * the read-modify-wrote.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * r-m-w may require a mask 0x3C to avoid hitting hard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * reset again for error reset (should be 0, chicken).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* give ERRORRESET some time to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) msleep(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (genwqe_need_err_masking(cd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) dev_info(&pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) "[%s] masking errors for old bitstreams\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) int genwqe_read_softreset(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) u64 bitstream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (!genwqe_is_privileged(cd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * genwqe_set_interrupt_capability() - Configure MSI capability structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * @cd: pointer to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * @count: number of vectors to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * Return: 0 if no error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) rc = pci_alloc_irq_vectors(cd->pci_dev, 1, count, PCI_IRQ_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * @cd: pointer to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) pci_free_irq_vectors(cd->pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * set_reg_idx() - Fill array with data. Ignore illegal offsets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * @cd: card device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * @r: debug register array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * @i: index to desired entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * @m: maximum possible entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * @addr: addr which is read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * @idx: index in debug array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * @val: read value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) unsigned int *i, unsigned int m, u32 addr, u32 idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (WARN_ON_ONCE(*i >= m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) r[*i].addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) r[*i].idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) r[*i].val = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ++*i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) unsigned int *i, unsigned int m, u32 addr, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return set_reg_idx(cd, r, i, m, addr, 0, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) unsigned int max_regs, int all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) unsigned int i, j, idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) u64 gfir, sluid, appid, ufir, ufec, sfir, sfec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /* Global FIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /* UnitCfg for SLU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* UnitCfg for APP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /* Check all chip Units */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) for (i = 0; i < GENWQE_MAX_UNITS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /* Unit FIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ufir_addr = (i << 24) | 0x008;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ufir = __genwqe_readq(cd, ufir_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Unit FEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ufec_addr = (i << 24) | 0x018;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) ufec = __genwqe_readq(cd, ufec_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) for (j = 0; j < 64; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* wherever there is a primary 1, read the 2ndary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!all && (!(ufir & (1ull << j))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) sfir_addr = (i << 24) | (0x100 + 8 * j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) sfir = __genwqe_readq(cd, sfir_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) sfec_addr = (i << 24) | (0x300 + 8 * j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) sfec = __genwqe_readq(cd, sfec_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /* fill with invalid data until end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) for (i = idx; i < max_regs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) regs[i].addr = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) regs[i].val = 0xffffffffffffffffull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * genwqe_ffdc_buff_size() - Calculates the number of dump registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * @uid: unit ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) int entries = 0, ring, traps, traces, trace_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) u32 eevptr_addr, l_addr, d_len, d_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) u64 eevptr, val, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) eevptr = __genwqe_readq(cd, eevptr_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if ((eevptr != 0x0) && (eevptr != -1ull)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) l_addr = GENWQE_UID_OFFS(uid) | eevptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) val = __genwqe_readq(cd, l_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if ((val == 0x0) || (val == -1ull))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* 38:24 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) d_len = (val & 0x0000007fff000000ull) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* 39 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) d_type = (val & 0x0000008000000000ull) >> 36;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (d_type) { /* repeat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) entries += d_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) } else { /* size in bytes! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) entries += d_len >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) l_addr += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) for (ring = 0; ring < 8; ring++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) val = __genwqe_readq(cd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if ((val == 0x0ull) || (val == -1ull))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) traps = (val >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) traces = (val >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) trace_entries = val & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) entries += traps + (traces * trace_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * @uid: unit ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * @regs: register information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * @max_regs: number of register entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct genwqe_reg *regs, unsigned int max_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) int i, traps, traces, trace, trace_entries, trace_entry, ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) unsigned int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) u32 eevptr_addr, l_addr, d_addr, d_len, d_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) u64 eevptr, e, val, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) eevptr = __genwqe_readq(cd, eevptr_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) l_addr = GENWQE_UID_OFFS(uid) | eevptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) e = __genwqe_readq(cd, l_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if ((e == 0x0) || (e == 0xffffffffffffffffull))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) d_addr = (e & 0x0000000000ffffffull); /* 23:0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) d_len = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) d_addr |= GENWQE_UID_OFFS(uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (d_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) for (i = 0; i < (int)d_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) val = __genwqe_readq(cd, d_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) set_reg_idx(cd, regs, &idx, max_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) d_addr, i, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) d_len >>= 3; /* Size in bytes! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) for (i = 0; i < (int)d_len; i++, d_addr += 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) val = __genwqe_readq(cd, d_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) set_reg_idx(cd, regs, &idx, max_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) d_addr, 0, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) l_addr += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * To save time, there are only 6 traces poplulated on Uid=2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * Ring=1. each with iters=512.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) 2...7 are ASI rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) val = __genwqe_readq(cd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if ((val == 0x0ull) || (val == -1ull))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) traps = (val >> 24) & 0xff; /* Number of Traps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) traces = (val >> 16) & 0xff; /* Number of Traces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) trace_entries = val & 0xffff; /* Entries per trace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* Note: This is a combined loop that dumps both the traps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* (for the trace == 0 case) as well as the traces 1 to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /* 'traces'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) for (trace = 0; trace <= traces; trace++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) u32 diag_sel =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) addr = (GENWQE_UID_OFFS(uid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) IO_EXTENDED_DIAG_SELECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) __genwqe_writeq(cd, addr, diag_sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) for (trace_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) trace_entry < (trace ? trace_entries : traps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) trace_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) addr = (GENWQE_UID_OFFS(uid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) IO_EXTENDED_DIAG_READ_MBX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) val = __genwqe_readq(cd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) set_reg_idx(cd, regs, &idx, max_regs, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) (diag_sel<<16) | trace_entry, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * genwqe_write_vreg() - Write register in virtual window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * @reg: register (byte) offset within BAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * @val: value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * @func: PCI virtual function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * Note, these registers are only accessible to the PF through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * VF-window. It is not intended for the VF to access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) __genwqe_writeq(cd, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * genwqe_read_vreg() - Read register in virtual window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * @reg: register (byte) offset within BAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * @func: PCI virtual function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * Note, these registers are only accessible to the PF through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * VF-window. It is not intended for the VF to access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return __genwqe_readq(cd, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * Note: From a design perspective it turned out to be a bad idea to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * use codes here to specifiy the frequency/speed values. An old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * driver cannot understand new codes and is therefore always a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * problem. Better is to measure out the value or put the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * speed/frequency directly into a register which is always a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * value for old as well as for new software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * Return: Card clock in MHz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) int genwqe_base_clock_frequency(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) u16 speed; /* MHz MHz MHz MHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static const int speed_grade[] = { 250, 200, 166, 175 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (speed >= ARRAY_SIZE(speed_grade))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return 0; /* illegal value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return speed_grade[speed];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * genwqe_stop_traps() - Stop traps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * Before reading out the analysis data, we need to stop the traps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) void genwqe_stop_traps(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * genwqe_start_traps() - Start traps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * @cd: genwqe device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * After having read the data, we can/must enable the traps again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) void genwqe_start_traps(struct genwqe_dev *cd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (genwqe_need_err_masking(cd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }