^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2014 Cisco Systems, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This program is free software; you may redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * the Free Software Foundation; version 2 of the License.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "vnic_resource.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "vnic_devcmd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "vnic_dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "vnic_stats.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "vnic_wq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define VNIC_DVCMD_TMO 10000 /* Devcmd Timeout value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct devcmd2_controller {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct vnic_wq_ctrl __iomem *wq_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct vnic_dev_ring results_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct vnic_wq wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct vnic_devcmd2 *cmd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct devcmd2_result *result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u16 next_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u16 result_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct vnic_res {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void __iomem *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct vnic_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) void *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct vnic_res res[RES_TYPE_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) enum vnic_dev_intr_mode intr_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct vnic_devcmd __iomem *devcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct vnic_devcmd_notify *notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct vnic_devcmd_notify notify_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) dma_addr_t notify_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 *linkstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) dma_addr_t linkstatus_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct vnic_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) dma_addr_t stats_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct vnic_devcmd_fw_info *fw_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) dma_addr_t fw_info_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u64 args[VNIC_DEVCMD_NARGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct devcmd2_controller *devcmd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define VNIC_MAX_RES_HDR_SIZE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) (sizeof(struct vnic_resource_header) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) sizeof(struct vnic_resource) * RES_TYPE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define VNIC_RES_STRIDE 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void *svnic_dev_priv(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static int vnic_dev_discover_res(struct vnic_dev *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct vnic_dev_bar *bar, unsigned int num_bars)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct vnic_resource_header __iomem *rh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct vnic_resource __iomem *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (num_bars == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) pr_err("vNIC BAR0 res hdr length error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) rh = bar->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (!rh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) pr_err("vNIC BAR0 res hdr not mem-mapped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ioread32(&rh->version) != VNIC_RES_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) VNIC_RES_MAGIC, VNIC_RES_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ioread32(&rh->magic), ioread32(&rh->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) r = (struct vnic_resource __iomem *)(rh + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u8 bar_num = ioread8(&r->bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 bar_offset = ioread32(&r->bar_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u32 count = ioread32(&r->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) r++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (bar_num >= num_bars)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (!bar[bar_num].len || !bar[bar_num].vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) case RES_TYPE_WQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) case RES_TYPE_RQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) case RES_TYPE_CQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) case RES_TYPE_INTR_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* each count is stride bytes long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) len = count * VNIC_RES_STRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (len + bar_offset > bar->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) type, bar_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) bar->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) case RES_TYPE_INTR_PBA_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) case RES_TYPE_DEVCMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) case RES_TYPE_DEVCMD2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) vdev->res[type].count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) enum vnic_res_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return vdev->res[type].count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!vdev->res[type].vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) case RES_TYPE_WQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) case RES_TYPE_RQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) case RES_TYPE_CQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) case RES_TYPE_INTR_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return (char __iomem *)vdev->res[type].vaddr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) index * VNIC_RES_STRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return (char __iomem *)vdev->res[type].vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned int desc_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned int desc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* The base address of the desc rings must be 512 byte aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Descriptor count is aligned to groups of 32 descriptors. A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * count of 0 means the maximum 4096 descriptors. Descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * size is aligned to 16 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned int count_align = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned int desc_align = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ring->base_align = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (desc_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) desc_count = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ring->desc_count = ALIGN(desc_count, count_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ring->desc_size = ALIGN(desc_size, desc_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ring->size = ring->desc_count * ring->desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ring->size_unaligned = ring->size + ring->base_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return ring->size_unaligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) memset(ring->descs, 0, ring->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned int desc_count, unsigned int desc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) svnic_dev_desc_ring_size(ring, desc_count, desc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ring->size_unaligned, &ring->base_addr_unaligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!ring->descs_unaligned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pr_err("Failed to allocate ring (size=%d), aborting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) (int)ring->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ring->base_addr = ALIGN(ring->base_addr_unaligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ring->base_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ring->descs = (u8 *)ring->descs_unaligned +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) (ring->base_addr - ring->base_addr_unaligned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) svnic_dev_clear_desc_ring(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ring->desc_avail = ring->desc_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (ring->descs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dma_free_coherent(&vdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ring->size_unaligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ring->descs_unaligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ring->base_addr_unaligned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ring->descs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct devcmd2_controller *dc2c = vdev->devcmd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct devcmd2_result *result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u32 posted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) u32 fetch_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u32 new_posted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u8 color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) fetch_idx = ioread32(&dc2c->wq_ctrl->fetch_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* Hardware surprise removal: return error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) posted = ioread32(&dc2c->wq_ctrl->posted_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (posted == 0xFFFFFFFF) { /* check for hardware gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* Hardware surprise removal: return error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (new_posted == fetch_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pr_err("%s: wq is full while issuing devcmd2 command %d, fetch index: %u, posted index: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) pci_name(vdev->pdev), _CMD_N(cmd), fetch_idx, posted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) dc2c->cmd_ring[posted].cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dc2c->cmd_ring[posted].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) dc2c->cmd_ring[posted].args[i] = vdev->args[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* Adding write memory barrier prevents compiler and/or CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * reordering, thus avoiding descriptor posting before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * descriptor is initialized. Otherwise, hardware can read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * stale descriptor fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) result = dc2c->result + dc2c->next_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) color = dc2c->color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * Increment next_result, after posting the devcmd, irrespective of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * devcmd result, and it should be done only once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) dc2c->next_result++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (dc2c->next_result == dc2c->result_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) dc2c->next_result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) dc2c->color = dc2c->color ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) for (delay = 0; delay < wait; delay++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (result->color == color) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (result->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) err = (int) result->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (err != ERR_ECMDUNKNOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) cmd != CMD_CAPABILITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) pr_err("Error %d devcmd %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) err, _CMD_N(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) vdev->args[i] = result->results[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pr_err("Timed out devcmd %d\n", _CMD_N(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static int svnic_dev_init_devcmd2(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct devcmd2_controller *dc2c = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned int fetch_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) void __iomem *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (vdev->devcmd2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) dc2c = kzalloc(sizeof(*dc2c), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (!dc2c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) vdev->devcmd2 = dc2c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dc2c->color = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dc2c->result_size = DEVCMD2_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ret = vnic_wq_devcmd2_alloc(vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) &dc2c->wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) DEVCMD2_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) DEVCMD2_DESC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) goto err_free_devcmd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* Hardware surprise removal: reset fetch_index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) fetch_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * Don't change fetch_index ever and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * set posted_index same as fetch_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * when setting up the WQ for devcmd2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) svnic_wq_enable(&dc2c->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ret = svnic_dev_alloc_desc_ring(vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) &dc2c->results_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) DEVCMD2_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) DEVCMD2_DESC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) goto err_free_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) dc2c->wq_ctrl = dc2c->wq.ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) vdev->args[0] = (u64) dc2c->results_ring.base_addr | VNIC_PADDR_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) vdev->args[1] = DEVCMD2_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ret = _svnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, VNIC_DVCMD_TMO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) goto err_free_desc_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) vdev->devcmd_rtn = &_svnic_dev_cmd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) pr_info("DEVCMD2 Initialized.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) err_free_desc_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) err_free_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) svnic_wq_disable(&dc2c->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) svnic_wq_free(&dc2c->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) err_free_devcmd2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) kfree(dc2c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) vdev->devcmd2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) } /* end of svnic_dev_init_devcmd2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct devcmd2_controller *dc2c = vdev->devcmd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) vdev->devcmd2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) vdev->devcmd_rtn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) svnic_wq_disable(&dc2c->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) svnic_wq_free(&dc2c->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) kfree(dc2c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) u64 *a0, u64 *a1, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) memset(vdev->args, 0, sizeof(vdev->args));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) vdev->args[0] = *a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) vdev->args[1] = *a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) *a0 = vdev->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) *a1 = vdev->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int svnic_dev_fw_info(struct vnic_dev *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct vnic_devcmd_fw_info **fw_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) u64 a0, a1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int wait = VNIC_DVCMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (!vdev->fw_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) sizeof(struct vnic_devcmd_fw_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) &vdev->fw_info_pa, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!vdev->fw_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) a0 = vdev->fw_info_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /* only get fw_info once and cache it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) err = svnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) *fw_info = vdev->fw_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned int size, void *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) u64 a0, a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) int wait = VNIC_DVCMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) a0 = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) a1 = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) err = svnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) *(u8 *)value = (u8)a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *(u16 *)value = (u16)a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) *(u32 *)value = (u32)a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) *(u64 *)value = a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int svnic_dev_stats_clear(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) u64 a0 = 0, a1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int wait = VNIC_DVCMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return svnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) u64 a0, a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) int wait = VNIC_DVCMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (!vdev->stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (!vdev->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) *stats = vdev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) a0 = vdev->stats_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) a1 = sizeof(struct vnic_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return svnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int svnic_dev_close(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) u64 a0 = 0, a1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int wait = VNIC_DVCMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return svnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int svnic_dev_enable_wait(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) u64 a0 = 0, a1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int wait = VNIC_DVCMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) err = svnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (err == ERR_ECMDUNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return svnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int svnic_dev_disable(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) u64 a0 = 0, a1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) int wait = VNIC_DVCMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return svnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int svnic_dev_open(struct vnic_dev *vdev, int arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) u64 a0 = (u32)arg, a1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int wait = VNIC_DVCMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return svnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int svnic_dev_open_done(struct vnic_dev *vdev, int *done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) u64 a0 = 0, a1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int wait = VNIC_DVCMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) *done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) err = svnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) *done = (a0 == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) u64 a0, a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) int wait = VNIC_DVCMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (!vdev->notify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) sizeof(struct vnic_devcmd_notify),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) &vdev->notify_pa, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (!vdev->notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) a0 = vdev->notify_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) a1 = ((u64)intr << 32) & VNIC_NOTIFY_INTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) a1 += sizeof(struct vnic_devcmd_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) void svnic_dev_notify_unset(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) u64 a0, a1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int wait = VNIC_DVCMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) a0 = 0; /* paddr = 0 to unset notify buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) a1 = VNIC_NOTIFY_INTR_MASK; /* intr num = -1 to unreg for intr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) a1 += sizeof(struct vnic_devcmd_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static int vnic_dev_notify_ready(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) u32 *words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) u32 csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!vdev->notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) csum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) memcpy(&vdev->notify_copy, vdev->notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) sizeof(struct vnic_devcmd_notify));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) words = (u32 *)&vdev->notify_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) for (i = 1; i < nwords; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) csum += words[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) } while (csum != words[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int svnic_dev_init(struct vnic_dev *vdev, int arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) u64 a0 = (u32)arg, a1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int wait = VNIC_DVCMD_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return svnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int svnic_dev_link_status(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (vdev->linkstatus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return *vdev->linkstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (!vnic_dev_notify_ready(vdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return vdev->notify_copy.link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (!vnic_dev_notify_ready(vdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return vdev->notify_copy.link_down_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) enum vnic_dev_intr_mode intr_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) vdev->intr_mode = intr_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return vdev->intr_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) void svnic_dev_unregister(struct vnic_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (vdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (vdev->notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dma_free_coherent(&vdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) sizeof(struct vnic_devcmd_notify),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) vdev->notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) vdev->notify_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (vdev->linkstatus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) dma_free_coherent(&vdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) vdev->linkstatus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) vdev->linkstatus_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (vdev->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) dma_free_coherent(&vdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) sizeof(struct vnic_stats),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) vdev->stats, vdev->stats_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (vdev->fw_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) dma_free_coherent(&vdev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) sizeof(struct vnic_devcmd_fw_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) vdev->fw_info, vdev->fw_info_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (vdev->devcmd2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) vnic_dev_deinit_devcmd2(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) kfree(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) void *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct vnic_dev_bar *bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) unsigned int num_bars)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (!vdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) vdev->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) vdev->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (vnic_dev_discover_res(vdev, bar, num_bars))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) svnic_dev_unregister(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) } /* end of svnic_dev_alloc_discover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * fallback option is left to keep the interface common for other vnics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) void __iomem *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) err = svnic_dev_init_devcmd2(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) pr_err("DEVCMD2 resource not found.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) } /* end of svnic_dev_cmd_init */