^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Marvell UMI driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2011 Marvell. <jyli@marvell.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <scsi/scsi_transport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <scsi/scsi_eh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "mvumi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) MODULE_AUTHOR("jyli@marvell.com");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) MODULE_DESCRIPTION("Marvell UMI Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static const struct pci_device_id mvumi_pci_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) { 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static void tag_init(struct mvumi_tag *st, unsigned short size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) BUG_ON(size != st->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) st->top = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) st->stack[i] = size - 1 - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) BUG_ON(st->top <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return st->stack[--st->top];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned short tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) BUG_ON(st->top >= st->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) st->stack[st->top++] = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static bool tag_is_empty(struct mvumi_tag *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (st->top == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) for (i = 0; i < MAX_BASE_ADDRESS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) addr_array[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) pci_iounmap(dev, addr_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) for (i = 0; i < MAX_BASE_ADDRESS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) addr_array[i] = pci_iomap(dev, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (!addr_array[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) dev_err(&dev->dev, "failed to map Bar[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mvumi_unmap_pci_addr(dev, addr_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) addr_array[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) enum resource_type type, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) "Failed to allocate memory for resource manager.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) case RESOURCE_CACHED_MEMORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) res->virt_addr = kzalloc(size, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (!res->virt_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) "unable to allocate memory,size = %d.\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) case RESOURCE_UNCACHED_MEMORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) size = round_up(size, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) &res->bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (!res->virt_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) "unable to allocate consistent mem,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) "size = %d.\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) res->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) res->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) INIT_LIST_HEAD(&res->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) list_add_tail(&res->entry, &mhba->res_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct mvumi_res *res, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) switch (res->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) case RESOURCE_UNCACHED_MEMORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) dma_free_coherent(&mhba->pdev->dev, res->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) res->virt_addr, res->bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) case RESOURCE_CACHED_MEMORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) kfree(res->virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) "unknown resource type %d\n", res->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) list_del(&res->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) mhba->fw_flag &= ~MVUMI_FW_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * mvumi_make_sgl - Prepares SGL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * @scmd: SCSI command from the mid-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @sgl_p: SGL to be filled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * @sg_count return the number of SG elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * If successful, this function returns 0. otherwise, it returns -1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void *sgl_p, unsigned char *sg_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned int sgnum = scsi_sg_count(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) dma_addr_t busaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) scmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (*sg_count > mhba->max_sge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) "sg count[0x%x] is bigger than max sg[0x%x].\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *sg_count, mhba->max_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) scmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) scsi_for_each_sg(scmd, sg, *sg_count, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) busaddr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) m_sg->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if ((i + 1) == *sg_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) m_sg->flags |= 1U << mhba->eot_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) sgd_inc(mhba, m_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct mvumi_sgl *m_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) void *virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dma_addr_t phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) cmd->frame->sg_counts = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) cmd->data_buf = virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) m_sg->flags = 1U << mhba->eot_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) sgd_setsz(mhba, m_sg, cpu_to_le32(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned int buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct mvumi_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) INIT_LIST_HEAD(&cmd->queue_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) &cmd->frame_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (!cmd->frame) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) " frame,size = %d.\n", mhba->ib_max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) kfree(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) dev_err(&mhba->pdev->dev, "failed to allocate memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) " for internal frame\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) cmd->frame, cmd->frame_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) kfree(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) cmd->frame->sg_counts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct mvumi_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct mvumi_sgl *m_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) dma_addr_t phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (cmd && cmd->frame) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (cmd->frame->sg_counts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) sgd_getsz(mhba, m_sg, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) phy_addr = (dma_addr_t) m_sg->baseaddr_l |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) cmd->frame, cmd->frame_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) kfree(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * mvumi_get_cmd - Get a command from the free pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * Returns a free command from the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct mvumi_cmd *cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (likely(!list_empty(&mhba->cmd_pool))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) cmd = list_entry((&mhba->cmd_pool)->next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct mvumi_cmd, queue_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) list_del_init(&cmd->queue_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * mvumi_return_cmd - Return a cmd to free command pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * @cmd: Command packet to be returned to free command pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct mvumi_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) cmd->scmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * mvumi_free_cmds - Free all the cmds in the free cmd pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static void mvumi_free_cmds(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct mvumi_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) while (!list_empty(&mhba->cmd_pool)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) queue_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) list_del(&cmd->queue_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) kfree(cmd->frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) kfree(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * mvumi_alloc_cmds - Allocates the command packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct mvumi_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) for (i = 0; i < mhba->max_io; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) goto err_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) INIT_LIST_HEAD(&cmd->queue_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) cmd->frame_phys = mhba->ib_frame_phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) + i * mhba->ib_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!cmd->frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) goto err_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) err_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) "failed to allocate memory for cmd[0x%x].\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) while (!list_empty(&mhba->cmd_pool)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) queue_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) list_del(&cmd->queue_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) kfree(cmd->frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) kfree(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned int ib_rp_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct mvumi_hw_regs *regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) (mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ((ib_rp_reg & regs->cl_pointer_toggle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return mhba->max_io - atomic_read(&mhba->fw_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) count = ioread32(mhba->ib_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (count == 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) unsigned int cur_ib_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) cur_ib_entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (cur_ib_entry >= mhba->list_num_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) cur_ib_entry -= mhba->list_num_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) *ib_entry = mhba->ib_list + cur_ib_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) sizeof(struct mvumi_dyn_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) atomic_inc(&mhba->fw_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) iowrite32(0xffff, mhba->ib_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) unsigned short tag, request_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) request_id = p_outb_frame->request_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) tag = p_outb_frame->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (tag > mhba->tag_pool.size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dev_err(&mhba->pdev->dev, "ob frame data error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (mhba->tag_cmd[tag] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) } else if (mhba->tag_cmd[tag]->request_id != request_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) mhba->request_id_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) "cmd request ID:0x%x\n", request_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) mhba->tag_cmd[tag]->request_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned int *cur_obf, unsigned int *assign_obf_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned int ob_write, ob_write_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct mvumi_hw_regs *regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ob_write = ioread32(regs->outb_copy_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) ob_write_shadow = ioread32(mhba->ob_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if ((ob_write & regs->cl_pointer_toggle) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) *assign_obf_end += mhba->list_num_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) unsigned int *cur_obf, unsigned int *assign_obf_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) unsigned int ob_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct mvumi_hw_regs *regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ob_write = ioread32(regs->outb_read_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ob_write = ioread32(regs->outb_copy_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (*assign_obf_end < *cur_obf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) *assign_obf_end += mhba->list_num_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) else if (*assign_obf_end == *cur_obf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) unsigned int cur_obf, assign_obf_end, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct mvumi_ob_data *ob_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct mvumi_rsp_frame *p_outb_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct mvumi_hw_regs *regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) for (i = (assign_obf_end - cur_obf); i != 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) cur_obf++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (cur_obf >= mhba->list_num_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) cur_obf -= mhba->list_num_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* Copy pointer may point to entry in outbound list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * before entry has valid data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) mhba->tag_cmd[p_outb_frame->tag] == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) p_outb_frame->request_id !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) mhba->tag_cmd[p_outb_frame->tag]->request_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (!list_empty(&mhba->ob_data_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ob_data = (struct mvumi_ob_data *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) list_first_entry(&mhba->ob_data_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct mvumi_ob_data, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) list_del_init(&ob_data->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ob_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (cur_obf == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) cur_obf = mhba->list_num_io - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) cur_obf -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) p_outb_frame->tag = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) list_add_tail(&ob_data->list, &mhba->free_ob_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static void mvumi_reset(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct mvumi_hw_regs *regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) iowrite32(0, regs->enpointa_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static unsigned char mvumi_start(struct mvumi_hba *mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) mhba->fw_state = FW_STATE_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) mvumi_reset(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (mvumi_start(mhba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct mvumi_hw_regs *regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned long before;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) before = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) iowrite32(0, regs->enpointa_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) tmp = ioread32(regs->arm_to_pciea_msg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) while (tmp != HANDSHAKE_READYSTATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) "FW reset failed [0x%x].\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) tmp = ioread32(regs->arm_to_pciea_msg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) unsigned char i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) for (i = 0; i < MAX_BASE_ADDRESS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) &mhba->pci_base[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) unsigned char i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) for (i = 0; i < MAX_BASE_ADDRESS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (mhba->pci_base[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) mhba->pci_base[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static int mvumi_pci_set_master(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (IS_DMA64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) mhba->fw_state = FW_STATE_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) iowrite32(0, mhba->regs->reset_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) iowrite32(0xf, mhba->regs->reset_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) iowrite32(0x10, mhba->regs->reset_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) iowrite32(0x10, mhba->regs->reset_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) pci_disable_device(mhba->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (pci_enable_device(mhba->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) dev_err(&mhba->pdev->dev, "enable device failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (mvumi_pci_set_master(mhba->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) dev_err(&mhba->pdev->dev, "set master failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) mvumi_restore_bar_addr(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (mvumi_wait_for_fw(mhba) == FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return mvumi_wait_for_outstanding(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return mvumi_wait_for_outstanding(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static int mvumi_host_reset(struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct mvumi_hba *mhba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) scmd->request->tag, scmd->cmnd[0], scmd->retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return mhba->instancet->reset_host(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct mvumi_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) cmd->cmd_status = REQ_STATUS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (atomic_read(&cmd->sync_cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) "last blocked cmd not finished, sync_cmd = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) atomic_read(&cmd->sync_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) atomic_inc(&cmd->sync_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) spin_lock_irqsave(mhba->shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) mhba->instancet->fire_cmd(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) spin_unlock_irqrestore(mhba->shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) wait_event_timeout(mhba->int_cmd_wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) (cmd->cmd_status != REQ_STATUS_PENDING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* command timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (atomic_read(&cmd->sync_cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) spin_lock_irqsave(mhba->shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) atomic_dec(&cmd->sync_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (mhba->tag_cmd[cmd->frame->tag]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) mhba->tag_cmd[cmd->frame->tag] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) cmd->frame->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (!list_empty(&cmd->queue_pointer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) dev_warn(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) "TIMEOUT:A internal command doesn't send!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) list_del_init(&cmd->queue_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) atomic_dec(&mhba->fw_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) spin_unlock_irqrestore(mhba->shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static void mvumi_release_fw(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) mvumi_free_cmds(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) mvumi_release_mem_resource(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) mhba->handshake_page, mhba->handshake_page_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) kfree(mhba->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) pci_release_regions(mhba->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct mvumi_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct mvumi_msg_frame *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) unsigned char device_id, retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) unsigned char bitcount = sizeof(unsigned char) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!(mhba->target_map[device_id / bitcount] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) (1 << (device_id % bitcount))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (retry++ >= 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) dev_err(&mhba->pdev->dev, "failed to get memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) " for internal flush cache cmd for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) "device %d", device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) goto get_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) cmd->scmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) cmd->cmd_status = REQ_STATUS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) atomic_set(&cmd->sync_cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) frame = cmd->frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) frame->req_function = CL_FUN_SCSI_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) frame->device_id = device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) frame->cmd_flag = CMD_FLAG_NON_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) frame->data_transfer_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) frame->cdb_length = MAX_COMMAND_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) memset(frame->cdb, 0, MAX_COMMAND_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) frame->cdb[1] = CDB_CORE_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) frame->cdb[2] = CDB_CORE_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) mvumi_issue_blocked_cmd(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (cmd->cmd_status != SAM_STAT_GOOD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) "device %d flush cache failed, status=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) device_id, cmd->cmd_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) mvumi_delete_internal_cmd(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) static unsigned char
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) unsigned short len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) unsigned char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) unsigned char ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ptr = (unsigned char *) p_header->frame_content;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ret ^= *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static void mvumi_hs_build_page(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct mvumi_hs_header *hs_header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct mvumi_hs_page2 *hs_page2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct mvumi_hs_page4 *hs_page4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct mvumi_hs_page3 *hs_page3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) u64 time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) u64 local_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) switch (hs_header->page_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) case HS_PAGE_HOST_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) hs_page2 = (struct mvumi_hs_page2 *) hs_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) hs_header->frame_length = sizeof(*hs_page2) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) memset(hs_header->frame_content, 0, hs_header->frame_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) hs_page2->host_type = 3; /* 3 mean linux*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) hs_page2->host_cap = 0x08;/* host dynamic source mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) hs_page2->host_ver.ver_major = VER_MAJOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) hs_page2->host_ver.ver_minor = VER_MINOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) hs_page2->host_ver.ver_oem = VER_OEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) hs_page2->host_ver.ver_build = VER_BUILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) hs_page2->system_io_bus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) hs_page2->slot_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) hs_page2->intr_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) hs_page2->intr_vector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) time = ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) local_time = (time - (sys_tz.tz_minuteswest * 60));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) hs_page2->seconds_since1970 = local_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) hs_header->checksum = mvumi_calculate_checksum(hs_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) hs_header->frame_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) case HS_PAGE_FIRM_CTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) hs_page3 = (struct mvumi_hs_page3 *) hs_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) hs_header->frame_length = sizeof(*hs_page3) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) memset(hs_header->frame_content, 0, hs_header->frame_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) hs_header->checksum = mvumi_calculate_checksum(hs_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) hs_header->frame_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) case HS_PAGE_CL_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) hs_page4 = (struct mvumi_hs_page4 *) hs_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) hs_header->frame_length = sizeof(*hs_page4) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) memset(hs_header->frame_content, 0, hs_header->frame_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) hs_page4->ib_entry_size = mhba->ib_max_size_setting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) hs_page4->ob_entry_size = mhba->ob_max_size_setting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (mhba->hba_capability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) hs_page4->ob_depth = find_first_bit((unsigned long *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) &mhba->list_num_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) hs_page4->ib_depth = find_first_bit((unsigned long *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) &mhba->list_num_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) hs_page4->ob_depth = (u8) mhba->list_num_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) hs_page4->ib_depth = (u8) mhba->list_num_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) hs_header->checksum = mvumi_calculate_checksum(hs_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) hs_header->frame_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) hs_header->page_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * mvumi_init_data - Initialize requested date for FW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static int mvumi_init_data(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct mvumi_ob_data *ob_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct mvumi_res *res_mgnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) unsigned int tmp_size, offset, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) void *virmem, *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) dma_addr_t p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (mhba->fw_flag & MVUMI_FW_ALLOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) tmp_size = mhba->ib_max_size * mhba->max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) tmp_size += 8 + sizeof(u32)*2 + 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) res_mgnt = mvumi_alloc_mem_resource(mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) RESOURCE_UNCACHED_MEMORY, tmp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (!res_mgnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) "failed to allocate memory for inbound list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) goto fail_alloc_dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) p = res_mgnt->bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) v = res_mgnt->virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* ib_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) offset = round_up(p, 128) - p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) p += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) v += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) mhba->ib_list = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) mhba->ib_list_phys = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) mhba->ib_frame = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) mhba->ib_frame_phys = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) v += mhba->ib_max_size * mhba->max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) p += mhba->ib_max_size * mhba->max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* ib shadow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) offset = round_up(p, 8) - p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) p += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) v += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) mhba->ib_shadow = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) mhba->ib_shadow_phys = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) p += sizeof(u32)*2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) v += sizeof(u32)*2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /* ob shadow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) offset = round_up(p, 8) - p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) p += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) v += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) mhba->ob_shadow = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) mhba->ob_shadow_phys = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) p += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) v += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) offset = round_up(p, 4) - p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) p += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) v += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) mhba->ob_shadow = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) mhba->ob_shadow_phys = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) v += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /* ob list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) offset = round_up(p, 128) - p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) p += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) v += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) mhba->ob_list = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) mhba->ob_list_phys = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* ob data pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) tmp_size = round_up(tmp_size, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) res_mgnt = mvumi_alloc_mem_resource(mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) RESOURCE_CACHED_MEMORY, tmp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (!res_mgnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) "failed to allocate memory for outbound data buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) goto fail_alloc_dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) virmem = res_mgnt->virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) for (i = mhba->max_io; i != 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) ob_pool = (struct mvumi_ob_data *) virmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) list_add_tail(&ob_pool->list, &mhba->ob_data_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) virmem += mhba->ob_max_size + sizeof(*ob_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) tmp_size = sizeof(unsigned short) * mhba->max_io +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) sizeof(struct mvumi_cmd *) * mhba->max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) (sizeof(unsigned char) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) res_mgnt = mvumi_alloc_mem_resource(mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) RESOURCE_CACHED_MEMORY, tmp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (!res_mgnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) "failed to allocate memory for tag and target map\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) goto fail_alloc_dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) virmem = res_mgnt->virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) mhba->tag_pool.stack = virmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) mhba->tag_pool.size = mhba->max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) tag_init(&mhba->tag_pool, mhba->max_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) virmem += sizeof(unsigned short) * mhba->max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) mhba->tag_cmd = virmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) mhba->target_map = virmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) mhba->fw_flag |= MVUMI_FW_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) fail_alloc_dma_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) mvumi_release_mem_resource(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static int mvumi_hs_process_page(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct mvumi_hs_header *hs_header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct mvumi_hs_page1 *hs_page1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) unsigned char page_checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) page_checksum = mvumi_calculate_checksum(hs_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) hs_header->frame_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (page_checksum != hs_header->checksum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) dev_err(&mhba->pdev->dev, "checksum error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) switch (hs_header->page_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) case HS_PAGE_FIRM_CAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) hs_page1 = (struct mvumi_hs_page1 *) hs_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) mhba->max_io = hs_page1->max_io_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) mhba->list_num_io = hs_page1->cl_inout_list_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) mhba->max_transfer_size = hs_page1->max_transfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) mhba->max_target_id = hs_page1->max_devices_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) mhba->hba_capability = hs_page1->capability;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) hs_page1->fw_ver.ver_build);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) mhba->eot_flag = 22;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) mhba->eot_flag = 27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) dev_err(&mhba->pdev->dev, "handshake: page code error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * mvumi_handshake - Move the FW to READY state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * During the initialization, FW passes can potentially be in any one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * several possible states. If the FW in operational, waiting-for-handshake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * states, driver must take steps to bring it to ready state. Otherwise, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * has to wait for the ready state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) static int mvumi_handshake(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) unsigned int hs_state, tmp, hs_fun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct mvumi_hs_header *hs_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct mvumi_hw_regs *regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (mhba->fw_state == FW_STATE_STARTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) hs_state = HS_S_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) tmp = ioread32(regs->arm_to_pciea_msg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) hs_state = HS_GET_STATE(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) mhba->fw_state = FW_STATE_STARTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) hs_fun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) switch (hs_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) case HS_S_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) mhba->fw_state = FW_STATE_HANDSHAKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) HS_SET_STATUS(hs_fun, HS_STATUS_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) HS_SET_STATE(hs_fun, HS_S_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) iowrite32(hs_fun, regs->pciea_to_arm_msg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) case HS_S_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) iowrite32(lower_32_bits(mhba->handshake_page_phys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) regs->pciea_to_arm_msg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) iowrite32(upper_32_bits(mhba->handshake_page_phys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) regs->arm_to_pciea_msg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) HS_SET_STATUS(hs_fun, HS_STATUS_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) iowrite32(hs_fun, regs->pciea_to_arm_msg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) case HS_S_PAGE_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) case HS_S_QUERY_PAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) case HS_S_SEND_PAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) mhba->hba_total_pages =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) ((struct mvumi_hs_page1 *) hs_header)->total_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (mhba->hba_total_pages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) mhba->hba_total_pages = HS_PAGE_TOTAL-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (hs_state == HS_S_QUERY_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (mvumi_hs_process_page(mhba, hs_header)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) HS_SET_STATE(hs_fun, HS_S_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (mvumi_init_data(mhba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) HS_SET_STATE(hs_fun, HS_S_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) } else if (hs_state == HS_S_PAGE_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) hs_header->page_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) mhba->hba_total_pages = HS_PAGE_TOTAL-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) hs_header->page_code++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) mvumi_hs_build_page(mhba, hs_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) HS_SET_STATE(hs_fun, HS_S_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) HS_SET_STATUS(hs_fun, HS_STATUS_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) iowrite32(hs_fun, regs->pciea_to_arm_msg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) case HS_S_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /* Set communication list ISR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) tmp = ioread32(regs->enpointa_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) tmp |= regs->int_comaout | regs->int_comaerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) iowrite32(tmp, regs->enpointa_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) iowrite32(mhba->list_num_io, mhba->ib_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* Set InBound List Available count shadow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) iowrite32(lower_32_bits(mhba->ib_shadow_phys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) regs->inb_aval_count_basel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) iowrite32(upper_32_bits(mhba->ib_shadow_phys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) regs->inb_aval_count_baseh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) /* Set OutBound List Available count shadow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) iowrite32((mhba->list_num_io-1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) regs->cl_pointer_toggle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) mhba->ob_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) iowrite32(lower_32_bits(mhba->ob_shadow_phys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) regs->outb_copy_basel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) iowrite32(upper_32_bits(mhba->ob_shadow_phys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) regs->outb_copy_baseh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) mhba->ib_cur_slot = (mhba->list_num_io - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) regs->cl_pointer_toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) mhba->ob_cur_slot = (mhba->list_num_io - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) regs->cl_pointer_toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) mhba->fw_state = FW_STATE_STARTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) hs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) unsigned int isr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) unsigned long before;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) before = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) mvumi_handshake(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) isr_status = mhba->instancet->read_fw_status_reg(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (mhba->fw_state == FW_STATE_STARTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) "no handshake response at state 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) mhba->fw_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) "isr : global=0x%x,status=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) mhba->global_isr, isr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) } while (!(isr_status & DRBL_HANDSHAKE_ISR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) unsigned int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) unsigned long before;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) before = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (tmp != HANDSHAKE_READYSTATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) iowrite32(DRBL_MU_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) mhba->regs->pciea_to_arm_drbl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) "invalid signature [0x%x].\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) mhba->fw_state = FW_STATE_STARTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (mvumi_handshake_event(mhba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) "handshake failed at state 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) mhba->fw_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) } while (mhba->fw_state != FW_STATE_STARTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static unsigned char mvumi_start(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) unsigned int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct mvumi_hw_regs *regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /* clear Door bell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) tmp = ioread32(regs->arm_to_pciea_drbl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) iowrite32(tmp, regs->enpointa_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (mvumi_check_handshake(mhba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * mvumi_complete_cmd - Completes a command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * @cmd: Command to be completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) struct mvumi_rsp_frame *ob_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) struct scsi_cmnd *scmd = cmd->scmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) cmd->scmd->SCp.ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) scmd->result = ob_frame->req_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) switch (ob_frame->req_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) case SAM_STAT_GOOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) scmd->result |= DID_OK << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) case SAM_STAT_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) scmd->result |= DID_BUS_BUSY << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) case SAM_STAT_CHECK_CONDITION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) scmd->result |= (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) sizeof(struct mvumi_sense_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) scmd->result |= (DRIVER_SENSE << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (scsi_bufflen(scmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) scsi_sg_count(scmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) scmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) cmd->scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) mvumi_return_cmd(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct mvumi_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) struct mvumi_rsp_frame *ob_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (atomic_read(&cmd->sync_cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) cmd->cmd_status = ob_frame->req_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) cmd->data_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) memcpy(cmd->data_buf, ob_frame->payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) sizeof(struct mvumi_sense_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) atomic_dec(&cmd->sync_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) wake_up(&mhba->int_cmd_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) static void mvumi_show_event(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct mvumi_driver_event *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) dev_warn(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (ptr->param_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) printk(KERN_WARNING "Event param(len 0x%x): ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) ptr->param_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) for (i = 0; i < ptr->param_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) printk(KERN_WARNING "0x%x ", ptr->params[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) printk(KERN_WARNING "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (ptr->sense_data_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) printk(KERN_WARNING "Event sense data(len 0x%x): ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ptr->sense_data_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) for (i = 0; i < ptr->sense_data_length; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) printk(KERN_WARNING "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (status == DEVICE_OFFLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) sdev->id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) scsi_remove_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) } else if (status == DEVICE_ONLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (!sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) scsi_add_device(mhba->shost, 0, devid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) devid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 0, devid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) static u64 mvumi_inquiry(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) unsigned int id, struct mvumi_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) struct mvumi_msg_frame *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) u64 wwid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) int cmd_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) int data_buf_len = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) cmd_alloc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) memset(cmd->data_buf, 0, data_buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) cmd->scmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) cmd->cmd_status = REQ_STATUS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) atomic_set(&cmd->sync_cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) frame = cmd->frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) frame->device_id = (u16) id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) frame->cmd_flag = CMD_FLAG_DATA_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) frame->req_function = CL_FUN_SCSI_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) frame->cdb_length = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) memset(frame->cdb, 0, frame->cdb_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) frame->cdb[0] = INQUIRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) frame->cdb[4] = frame->data_transfer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) mvumi_issue_blocked_cmd(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (cmd->cmd_status == SAM_STAT_GOOD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) wwid = id + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) memcpy((void *)&wwid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) MVUMI_INQUIRY_UUID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) dev_dbg(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) wwid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (cmd_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) mvumi_delete_internal_cmd(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return wwid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) static void mvumi_detach_devices(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct mvumi_device *mv_dev = NULL , *dev_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) struct scsi_device *sdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) mutex_lock(&mhba->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /* detach Hard Disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) list_for_each_entry_safe(mv_dev, dev_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) &mhba->shost_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) list_del_init(&mv_dev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) mv_dev->id, mv_dev->wwid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) kfree(mv_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) list_del_init(&mv_dev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) mv_dev->id, mv_dev->wwid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) kfree(mv_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /* detach virtual device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) sdev = scsi_device_lookup(mhba->shost, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) mhba->max_target_id - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) scsi_remove_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) mutex_unlock(&mhba->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) scsi_rescan_device(&sdev->sdev_gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) struct mvumi_device *mv_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (mv_dev->wwid == wwid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (mv_dev->id != id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) "%s has same wwid[%llx] ,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) " but different id[%d %d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) __func__, mv_dev->wwid, mv_dev->id, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (mhba->pdev->device ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) PCI_DEVICE_ID_MARVELL_MV9143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) mvumi_rescan_devices(mhba, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct mvumi_device *mv_dev = NULL, *dev_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) list_for_each_entry_safe(mv_dev, dev_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) &mhba->shost_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (mv_dev->id == id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) dev_dbg(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) "detach device(0:%d:0) wwid(%llx) from HOST\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) mv_dev->id, mv_dev->wwid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) list_del_init(&mv_dev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) kfree(mv_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) static int mvumi_probe_devices(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) int id, maxid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) u64 wwid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct mvumi_device *mv_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) struct mvumi_cmd *cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) cmd = mvumi_create_internal_cmd(mhba, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) maxid = mhba->max_target_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) maxid = mhba->max_target_id - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) for (id = 0; id < maxid; id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) wwid = mvumi_inquiry(mhba, id, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (!wwid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /* device no response, remove it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) mvumi_remove_devices(mhba, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) /* device response, add it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) found = mvumi_match_devices(mhba, id, wwid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) mvumi_remove_devices(mhba, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) mv_dev = kzalloc(sizeof(struct mvumi_device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (!mv_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) "%s alloc mv_dev failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) mv_dev->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) mv_dev->wwid = wwid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) mv_dev->sdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) INIT_LIST_HEAD(&mv_dev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) list_add_tail(&mv_dev->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) &mhba->mhba_dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) dev_dbg(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) "probe a new device(0:%d:0)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) " wwid(%llx)\n", id, mv_dev->wwid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) } else if (found == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) mvumi_delete_internal_cmd(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) static int mvumi_rescan_bus(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct mvumi_hba *mhba = (struct mvumi_hba *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct mvumi_device *mv_dev = NULL , *dev_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (!atomic_read(&mhba->pnp_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) atomic_set(&mhba->pnp_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) mutex_lock(&mhba->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) ret = mvumi_probe_devices(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) list_for_each_entry_safe(mv_dev, dev_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) &mhba->mhba_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (mvumi_handle_hotplug(mhba, mv_dev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) DEVICE_ONLINE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) "%s add device(0:%d:0) failed"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) "wwid(%llx) has exist\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) mv_dev->id, mv_dev->wwid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) list_del_init(&mv_dev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) kfree(mv_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) list_move_tail(&mv_dev->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) &mhba->shost_dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) mutex_unlock(&mhba->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) static void mvumi_proc_msg(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) struct mvumi_hotplug_event *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) u16 size = param->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) const unsigned long *ar_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) const unsigned long *re_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (mhba->fw_flag & MVUMI_FW_ATTACH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) ar_bitmap = (const unsigned long *) param->bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) re_bitmap = (const unsigned long *) ¶m->bitmap[size >> 3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) mutex_lock(&mhba->sas_discovery_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) index = find_next_zero_bit(ar_bitmap, size, index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (index >= size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) index = find_next_zero_bit(re_bitmap, size, index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (index >= size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) mutex_unlock(&mhba->sas_discovery_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (msg == APICDB1_EVENT_GETEVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) int i, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) struct mvumi_driver_event *param = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) struct mvumi_event_req *er = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) count = er->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (count > MAX_EVENTS_RETURNED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) " than max event count[0x%x].\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) count, MAX_EVENTS_RETURNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) param = &er->events[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) mvumi_show_event(mhba, param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) } else if (msg == APICDB1_HOST_GETEVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) mvumi_proc_msg(mhba, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) struct mvumi_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) struct mvumi_msg_frame *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) cmd = mvumi_create_internal_cmd(mhba, 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) cmd->scmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) cmd->cmd_status = REQ_STATUS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) atomic_set(&cmd->sync_cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) frame = cmd->frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) frame->device_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) frame->cmd_flag = CMD_FLAG_DATA_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) frame->req_function = CL_FUN_SCSI_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) frame->cdb_length = MAX_COMMAND_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) frame->data_transfer_length = sizeof(struct mvumi_event_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) memset(frame->cdb, 0, MAX_COMMAND_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) frame->cdb[0] = APICDB0_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) frame->cdb[1] = msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) mvumi_issue_blocked_cmd(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (cmd->cmd_status != SAM_STAT_GOOD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) cmd->cmd_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) mvumi_delete_internal_cmd(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) static void mvumi_scan_events(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) struct mvumi_events_wq *mu_ev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) container_of(work, struct mvumi_events_wq, work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) mvumi_get_event(mu_ev->mhba, mu_ev->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) kfree(mu_ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) struct mvumi_events_wq *mu_ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (isr_status & DRBL_BUS_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) atomic_inc(&mhba->pnp_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) wake_up_process(mhba->dm_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) isr_status &= ~(DRBL_BUS_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (mu_ev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) mu_ev->mhba = mhba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) mu_ev->event = APICDB1_EVENT_GETEVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) isr_status &= ~(DRBL_EVENT_NOTIFY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) mu_ev->param = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) schedule_work(&mu_ev->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) static void mvumi_handle_clob(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) struct mvumi_rsp_frame *ob_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct mvumi_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct mvumi_ob_data *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) while (!list_empty(&mhba->free_ob_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) pool = list_first_entry(&mhba->free_ob_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) struct mvumi_ob_data, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) list_del_init(&pool->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) list_add_tail(&pool->list, &mhba->ob_data_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) cmd = mhba->tag_cmd[ob_frame->tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) atomic_dec(&mhba->fw_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) mhba->tag_cmd[ob_frame->tag] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (cmd->scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) mvumi_complete_cmd(mhba, cmd, ob_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) mhba->instancet->fire_cmd(mhba, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static irqreturn_t mvumi_isr_handler(int irq, void *devp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) spin_lock_irqsave(mhba->shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) spin_unlock_irqrestore(mhba->shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) mvumi_launch_events(mhba, mhba->isr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) mvumi_handshake(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (mhba->global_isr & mhba->regs->int_comaout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) mvumi_receive_ob_list_entry(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) mhba->global_isr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) mhba->isr_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (mhba->fw_state == FW_STATE_STARTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) mvumi_handle_clob(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) spin_unlock_irqrestore(mhba->shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) struct mvumi_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) void *ib_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) struct mvumi_msg_frame *ib_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) unsigned int frame_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) ib_frame = cmd->frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (tag_is_empty(&mhba->tag_pool)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) dev_dbg(&mhba->pdev->dev, "no free tag.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) mvumi_get_ib_list_entry(mhba, &ib_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) cmd->frame->request_id = mhba->io_seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) cmd->request_id = cmd->frame->request_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) mhba->tag_cmd[cmd->frame->tag] = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) frame_len = sizeof(*ib_frame) - 4 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) ib_frame->sg_counts * sizeof(struct mvumi_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) struct mvumi_dyn_list_entry *dle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) dle = ib_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) dle->src_low_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) cpu_to_le32(lower_32_bits(cmd->frame_phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) dle->src_high_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) cpu_to_le32(upper_32_bits(cmd->frame_phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) dle->if_length = (frame_len >> 2) & 0xFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) memcpy(ib_entry, ib_frame, frame_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return MV_QUEUE_COMMAND_RESULT_SENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) unsigned short num_of_cl_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) enum mvumi_qc_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) count = mhba->instancet->check_ib_list(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (list_empty(&mhba->waiting_req_list) || !count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) cmd = list_first_entry(&mhba->waiting_req_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) struct mvumi_cmd, queue_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) list_del_init(&cmd->queue_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) result = mvumi_send_command(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) case MV_QUEUE_COMMAND_RESULT_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) num_of_cl_sent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (num_of_cl_sent > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) mvumi_send_ib_list_entry(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) } while (!list_empty(&mhba->waiting_req_list) && count--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (num_of_cl_sent > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) mvumi_send_ib_list_entry(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * mvumi_enable_intr - Enables interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) static void mvumi_enable_intr(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) unsigned int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) struct mvumi_hw_regs *regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) mask = ioread32(regs->enpointa_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) iowrite32(mask, regs->enpointa_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) * mvumi_disable_intr -Disables interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) static void mvumi_disable_intr(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) unsigned int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) struct mvumi_hw_regs *regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) iowrite32(0, regs->arm_to_pciea_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) mask = ioread32(regs->enpointa_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) regs->int_comaerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) iowrite32(mask, regs->enpointa_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static int mvumi_clear_intr(void *extend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) unsigned int status, isr_status = 0, tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) struct mvumi_hw_regs *regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) status = ioread32(regs->main_int_cause_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (unlikely(status & regs->int_comaerr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) tmp = ioread32(regs->outb_isr_cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (tmp & regs->clic_out_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) iowrite32(tmp & regs->clic_out_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) regs->outb_isr_cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (tmp & (regs->clic_in_err | regs->clic_out_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) iowrite32(tmp & (regs->clic_in_err |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) regs->clic_out_err),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) regs->outb_isr_cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) status ^= mhba->regs->int_comaerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) /* inbound or outbound parity error, command will timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (status & regs->int_comaout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) tmp = ioread32(regs->outb_isr_cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (tmp & regs->clic_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (status & regs->int_dl_cpu2pciea) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (isr_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) mhba->global_isr = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) mhba->isr_status = isr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * mvumi_read_fw_status_reg - returns the current FW status value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) static struct mvumi_instance_template mvumi_instance_9143 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) .fire_cmd = mvumi_fire_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) .enable_intr = mvumi_enable_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) .disable_intr = mvumi_disable_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) .clear_intr = mvumi_clear_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) .read_fw_status_reg = mvumi_read_fw_status_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) .check_ib_list = mvumi_check_ib_list_9143,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) .check_ob_list = mvumi_check_ob_list_9143,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) .reset_host = mvumi_reset_host_9143,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) static struct mvumi_instance_template mvumi_instance_9580 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) .fire_cmd = mvumi_fire_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) .enable_intr = mvumi_enable_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) .disable_intr = mvumi_disable_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) .clear_intr = mvumi_clear_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) .read_fw_status_reg = mvumi_read_fw_status_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) .check_ib_list = mvumi_check_ib_list_9580,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) .check_ob_list = mvumi_check_ob_list_9580,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) .reset_host = mvumi_reset_host_9580,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) static int mvumi_slave_configure(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) struct mvumi_hba *mhba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) unsigned char bitcount = sizeof(unsigned char) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) mhba = (struct mvumi_hba *) sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (sdev->id >= mhba->max_target_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) * mvumi_build_frame - Prepares a direct cdb (DCDB) command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) * @scmd: SCSI command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * @cmd: Command to be prepared in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) * This function prepares CDB commands. These are typcially pass-through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) * commands to the devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) struct mvumi_msg_frame *pframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) cmd->scmd = scmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) cmd->cmd_status = REQ_STATUS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) pframe = cmd->frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) pframe->device_id = ((unsigned short) scmd->device->id) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) (((unsigned short) scmd->device->lun) << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) pframe->cmd_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) switch (scmd->sc_data_direction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) case DMA_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) pframe->cmd_flag |= CMD_FLAG_NON_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) pframe->cmd_flag |= CMD_FLAG_DATA_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) pframe->cdb_length = scmd->cmd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) pframe->req_function = CL_FUN_SCSI_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (scsi_bufflen(scmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) &pframe->sg_counts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) pframe->data_transfer_length = scsi_bufflen(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) pframe->sg_counts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) pframe->data_transfer_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * mvumi_queue_command - Queue entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * @scmd: SCSI command to be queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * @done: Callback entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) static int mvumi_queue_command(struct Scsi_Host *shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) struct mvumi_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) struct mvumi_hba *mhba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) spin_lock_irqsave(shost->host_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) mhba = (struct mvumi_hba *) shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) scmd->result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) cmd = mvumi_get_cmd(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (unlikely(!cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) spin_unlock_irqrestore(shost->host_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) goto out_return_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) cmd->scmd = scmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) scmd->SCp.ptr = (char *) cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) mhba->instancet->fire_cmd(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) spin_unlock_irqrestore(shost->host_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) out_return_cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) mvumi_return_cmd(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) spin_unlock_irqrestore(shost->host_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) struct Scsi_Host *host = scmd->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) struct mvumi_hba *mhba = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) spin_lock_irqsave(mhba->shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (mhba->tag_cmd[cmd->frame->tag]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) mhba->tag_cmd[cmd->frame->tag] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (!list_empty(&cmd->queue_pointer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) list_del_init(&cmd->queue_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) atomic_dec(&mhba->fw_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) scmd->SCp.ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (scsi_bufflen(scmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) scsi_sg_count(scmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) scmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) mvumi_return_cmd(mhba, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) spin_unlock_irqrestore(mhba->shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) return BLK_EH_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) sector_t capacity, int geom[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) int heads, sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) sector_t cylinders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) heads = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) sectors = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) tmp = heads * sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) cylinders = capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) sector_div(cylinders, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) if (capacity >= 0x200000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) heads = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) sectors = 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) tmp = heads * sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) cylinders = capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) sector_div(cylinders, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) geom[0] = heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) geom[1] = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) geom[2] = cylinders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) static struct scsi_host_template mvumi_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) .name = "Marvell Storage Controller",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) .slave_configure = mvumi_slave_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) .queuecommand = mvumi_queue_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) .eh_timed_out = mvumi_timed_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) .eh_host_reset_handler = mvumi_host_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) .bios_param = mvumi_bios_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) .dma_boundary = PAGE_SIZE - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) .this_id = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) void *base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) struct mvumi_hw_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) switch (mhba->pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) case PCI_DEVICE_ID_MARVELL_MV9143:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) mhba->mmio = mhba->base_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) base = mhba->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (!mhba->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (mhba->regs == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) /* For Arm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) regs->ctrl_sts_reg = base + 0x20104;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) regs->rstoutn_mask_reg = base + 0x20108;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) regs->sys_soft_rst_reg = base + 0x2010C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) regs->main_int_cause_reg = base + 0x20200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) regs->enpointa_mask_reg = base + 0x2020C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) regs->rstoutn_en_reg = base + 0xF1400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) /* For Doorbell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) regs->pciea_to_arm_drbl_reg = base + 0x20400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) regs->arm_to_pciea_drbl_reg = base + 0x20408;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) regs->arm_to_pciea_mask_reg = base + 0x2040C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) regs->pciea_to_arm_msg0 = base + 0x20430;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) regs->pciea_to_arm_msg1 = base + 0x20434;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) regs->arm_to_pciea_msg0 = base + 0x20438;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) regs->arm_to_pciea_msg1 = base + 0x2043C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) /* For Message Unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) regs->inb_aval_count_basel = base + 0x508;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) regs->inb_aval_count_baseh = base + 0x50C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) regs->inb_write_pointer = base + 0x518;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) regs->inb_read_pointer = base + 0x51C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) regs->outb_coal_cfg = base + 0x568;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) regs->outb_copy_basel = base + 0x5B0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) regs->outb_copy_baseh = base + 0x5B4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) regs->outb_copy_pointer = base + 0x544;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) regs->outb_read_pointer = base + 0x548;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) regs->outb_isr_cause = base + 0x560;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) regs->outb_coal_cfg = base + 0x568;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) /* Bit setting for HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) regs->int_comaout = 1 << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) regs->int_comaerr = 1 << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) regs->int_dl_cpu2pciea = 1 << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) regs->cl_pointer_toggle = 1 << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) regs->clic_irq = 1 << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) regs->clic_in_err = 1 << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) regs->clic_out_err = 1 << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) regs->cl_slot_num_mask = 0xFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) regs->int_drbl_int_mask = 0x3FFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) regs->int_comaerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) case PCI_DEVICE_ID_MARVELL_MV9580:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) mhba->mmio = mhba->base_addr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) base = mhba->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) if (!mhba->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) if (mhba->regs == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) regs = mhba->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) /* For Arm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) regs->ctrl_sts_reg = base + 0x20104;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) regs->rstoutn_mask_reg = base + 0x1010C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) regs->sys_soft_rst_reg = base + 0x10108;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) regs->main_int_cause_reg = base + 0x10200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) regs->enpointa_mask_reg = base + 0x1020C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) regs->rstoutn_en_reg = base + 0xF1400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) /* For Doorbell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) regs->pciea_to_arm_drbl_reg = base + 0x10460;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) regs->arm_to_pciea_drbl_reg = base + 0x10480;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) regs->arm_to_pciea_mask_reg = base + 0x10484;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) regs->pciea_to_arm_msg0 = base + 0x10400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) regs->pciea_to_arm_msg1 = base + 0x10404;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) regs->arm_to_pciea_msg0 = base + 0x10420;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) regs->arm_to_pciea_msg1 = base + 0x10424;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) /* For reset*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) regs->reset_request = base + 0x10108;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) regs->reset_enable = base + 0x1010c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) /* For Message Unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) regs->inb_aval_count_basel = base + 0x4008;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) regs->inb_aval_count_baseh = base + 0x400C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) regs->inb_write_pointer = base + 0x4018;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) regs->inb_read_pointer = base + 0x401C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) regs->outb_copy_basel = base + 0x4058;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) regs->outb_copy_baseh = base + 0x405C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) regs->outb_copy_pointer = base + 0x406C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) regs->outb_read_pointer = base + 0x4070;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) regs->outb_coal_cfg = base + 0x4080;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) regs->outb_isr_cause = base + 0x4088;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) /* Bit setting for HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) regs->int_comaout = 1 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) regs->int_dl_cpu2pciea = 1 << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) regs->int_comaerr = 1 << 29;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) regs->cl_pointer_toggle = 1 << 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) regs->cl_slot_num_mask = 0x3FFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) regs->clic_irq = 1 << 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) regs->clic_out_err = 1 << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) regs->int_drbl_int_mask = 0x3FFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) * mvumi_init_fw - Initializes the FW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) * This is the main function for initializing firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) static int mvumi_init_fw(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) goto fail_ioremap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) switch (mhba->pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) case PCI_DEVICE_ID_MARVELL_MV9143:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) mhba->instancet = &mvumi_instance_9143;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) mhba->io_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) mhba->max_sge = MVUMI_MAX_SG_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) mhba->request_id_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) case PCI_DEVICE_ID_MARVELL_MV9580:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) mhba->instancet = &mvumi_instance_9580;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) mhba->io_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) mhba->max_sge = MVUMI_MAX_SG_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) mhba->pdev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) mhba->instancet = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) goto fail_alloc_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) mhba->pdev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) ret = mvumi_cfg_hw_reg(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) "failed to allocate memory for reg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) goto fail_alloc_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) if (!mhba->handshake_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) "failed to allocate memory for handshake\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) goto fail_alloc_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) if (mvumi_start(mhba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) goto fail_ready_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) ret = mvumi_alloc_cmds(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) goto fail_ready_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) fail_ready_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) mvumi_release_mem_resource(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) mhba->handshake_page, mhba->handshake_page_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) fail_alloc_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) kfree(mhba->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) fail_alloc_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) fail_ioremap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) pci_release_regions(mhba->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * mvumi_io_attach - Attaches this driver to SCSI mid-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) * @mhba: Adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) static int mvumi_io_attach(struct mvumi_hba *mhba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) struct Scsi_Host *host = mhba->shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) struct scsi_device *sdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) unsigned int max_sg = (mhba->ib_max_size + 4 -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) host->irq = mhba->pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) host->unique_id = mhba->unique_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) host->max_sectors = mhba->max_transfer_size / 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) host->max_id = mhba->max_target_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) host->max_cmd_len = MAX_COMMAND_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) ret = scsi_add_host(host, &mhba->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) mhba->fw_flag |= MVUMI_FW_ATTACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) mutex_lock(&mhba->sas_discovery_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) dev_err(&mhba->pdev->dev, "add virtual device failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) mutex_unlock(&mhba->sas_discovery_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) goto fail_add_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) mhba->dm_thread = kthread_create(mvumi_rescan_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) mhba, "mvumi_scanthread");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (IS_ERR(mhba->dm_thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) dev_err(&mhba->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) "failed to create device scan thread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) ret = PTR_ERR(mhba->dm_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) mutex_unlock(&mhba->sas_discovery_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) goto fail_create_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) atomic_set(&mhba->pnp_count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) wake_up_process(mhba->dm_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) mutex_unlock(&mhba->sas_discovery_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) fail_create_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) sdev = scsi_device_lookup(mhba->shost, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) mhba->max_target_id - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) if (sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) scsi_remove_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) fail_add_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) scsi_remove_host(mhba->shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) * mvumi_probe_one - PCI hotplug entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) * @pdev: PCI device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) * @id: PCI ids of supported hotplugged adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) struct Scsi_Host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) struct mvumi_hba *mhba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) pdev->vendor, pdev->device, pdev->subsystem_vendor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) pdev->subsystem_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) ret = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) ret = mvumi_pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) goto fail_set_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) dev_err(&pdev->dev, "scsi_host_alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) goto fail_alloc_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) mhba = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) INIT_LIST_HEAD(&mhba->cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) INIT_LIST_HEAD(&mhba->ob_data_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) INIT_LIST_HEAD(&mhba->free_ob_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) INIT_LIST_HEAD(&mhba->res_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) INIT_LIST_HEAD(&mhba->waiting_req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) mutex_init(&mhba->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) INIT_LIST_HEAD(&mhba->mhba_dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) INIT_LIST_HEAD(&mhba->shost_dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) atomic_set(&mhba->fw_outstanding, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) init_waitqueue_head(&mhba->int_cmd_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) mutex_init(&mhba->sas_discovery_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) mhba->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) mhba->shost = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) ret = mvumi_init_fw(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) goto fail_init_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) "mvumi", mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) dev_err(&pdev->dev, "failed to register IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) goto fail_init_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) mhba->instancet->enable_intr(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) pci_set_drvdata(pdev, mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) ret = mvumi_io_attach(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) goto fail_io_attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) mvumi_backup_bar_addr(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) fail_io_attach:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) mhba->instancet->disable_intr(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) free_irq(mhba->pdev->irq, mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) fail_init_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) mvumi_release_fw(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) fail_init_fw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) fail_alloc_instance:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) fail_set_dma_mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) static void mvumi_detach_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) struct Scsi_Host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) struct mvumi_hba *mhba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) mhba = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if (mhba->dm_thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) kthread_stop(mhba->dm_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) mhba->dm_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) mvumi_detach_devices(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) host = mhba->shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) scsi_remove_host(mhba->shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) mvumi_flush_cache(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) mhba->instancet->disable_intr(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) free_irq(mhba->pdev->irq, mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) mvumi_release_fw(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) dev_dbg(&pdev->dev, "driver is removed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) * mvumi_shutdown - Shutdown entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) * @device: Generic device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) static void mvumi_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) struct mvumi_hba *mhba = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) mvumi_flush_cache(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) struct mvumi_hba *mhba = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) mhba = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) mvumi_flush_cache(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) pci_set_drvdata(pdev, mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) mhba->instancet->disable_intr(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) free_irq(mhba->pdev->irq, mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) mvumi_unmap_pci_addr(pdev, mhba->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) pci_set_power_state(pdev, pci_choose_state(pdev, state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) struct mvumi_hba *mhba = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) mhba = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) pci_set_power_state(pdev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) pci_enable_wake(pdev, PCI_D0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) pci_restore_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) ret = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) dev_err(&pdev->dev, "enable device failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) ret = mvumi_pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) goto release_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) if (mvumi_cfg_hw_reg(mhba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) goto unmap_pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) mhba->mmio = mhba->base_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) mvumi_reset(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) if (mvumi_start(mhba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) goto unmap_pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) "mvumi", mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) dev_err(&pdev->dev, "failed to register IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) goto unmap_pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) mhba->instancet->enable_intr(mhba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) unmap_pci_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) mvumi_unmap_pci_addr(pdev, mhba->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) release_regions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) static struct pci_driver mvumi_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) .name = MV_DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) .id_table = mvumi_pci_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) .probe = mvumi_probe_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) .remove = mvumi_detach_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) .shutdown = mvumi_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) .suspend = mvumi_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) .resume = mvumi_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) module_pci_driver(mvumi_pci_driver);