^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1998-2002 by Jes Sorensen, <jes@wildopensource.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Thanks to Essential Communication for providing us with hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * and very comprehensive documentation without which I would not have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * been able to write this driver. A special thank you to John Gibbon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * for sorting out the legal issues, with the NDA, allowing the code to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * be released under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * stupid bugs in my code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Softnet support and various other patches from Val Henson of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * ODS/Essential.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * PCI DMA mapping code partly based on work by Francois Romieu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define DEBUG 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define RX_DMA_SKBUFF 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define PKT_COPY_THRESHOLD 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/hippidevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define rr_if_busy(dev) netif_queue_stopped(dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define rr_if_running(dev) netif_running(dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "rrunner.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define RUN_AT(x) (jiffies + (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static const char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static const struct net_device_ops rr_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) .ndo_open = rr_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) .ndo_stop = rr_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) .ndo_do_ioctl = rr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) .ndo_start_xmit = rr_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) .ndo_set_mac_address = hippi_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Implementation notes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * The DMA engine only allows for DMA within physical 64KB chunks of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * memory. The current approach of the driver (and stack) is to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * linear blocks of memory for the skbuffs. However, as the data block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * is always the first part of the skb and skbs are 2^n aligned so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * are guarantted to get the whole block within one 64KB align 64KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * On the long term, relying on being able to allocate 64KB linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * chunks of memory is not feasible and the skb handling code and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * stack will need to know about I/O vectors or something similar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static int version_disp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u8 pci_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct rr_private *rrpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void *tmpptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) dma_addr_t ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) dev = alloc_hippi_dev(sizeof(struct rr_private));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) goto out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ret = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ret = pci_request_regions(pdev, "rrunner");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) rrpriv->pci_dev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) spin_lock_init(&rrpriv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dev->netdev_ops = &rr_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* display version info if adapter is found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!version_disp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* set display flag to TRUE so that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* we only display this string ONCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) version_disp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) printk(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (pci_latency <= 0x58){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) pci_latency = 0x58;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) "at 0x%llx, irq %i, PCI latency %i\n", dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) (unsigned long long)pci_resource_start(pdev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) pdev->irq, pci_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Remap the MMIO regs into kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) rrpriv->regs = pci_iomap(pdev, 0, 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!rrpriv->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) printk(KERN_ERR "%s: Unable to map I/O register, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) "RoadRunner will be disabled.\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) tmpptr = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) rrpriv->tx_ring = tmpptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) rrpriv->tx_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (!tmpptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) tmpptr = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) rrpriv->rx_ring = tmpptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) rrpriv->rx_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (!tmpptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) tmpptr = dma_alloc_coherent(&pdev->dev, EVT_RING_SIZE, &ring_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) rrpriv->evt_ring = tmpptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) rrpriv->evt_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (!tmpptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Don't access any register before this point!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) &rrpriv->regs->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * Need to add a case for little-endian 64-bit hosts here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) rr_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ret = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (rrpriv->evt_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rrpriv->evt_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) rrpriv->evt_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (rrpriv->rx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rrpriv->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) rrpriv->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (rrpriv->tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rrpriv->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) rrpriv->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (rrpriv->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) pci_iounmap(pdev, rrpriv->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) out3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void rr_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct rr_private *rr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) printk(KERN_ERR "%s: trying to unload running NIC\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) writel(HALT_NIC, &rr->regs->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rr->evt_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) rr->evt_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rr->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) rr->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rr->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) rr->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) pci_iounmap(pdev, rr->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * Commands are considered to be slow, thus there is no reason to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * inline this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct rr_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * This is temporary - it will go away in the final version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * We probably also want to make this function inline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (readl(®s->HostCtrl) & NIC_HALTED){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) printk("issuing command for halted NIC, code 0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) "HostCtrl %08x\n", cmd->code, readl(®s->HostCtrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (readl(®s->Mode) & FATAL_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) printk("error codes Fail1 %02x, Fail2 %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) readl(®s->Fail1), readl(®s->Fail2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) idx = rrpriv->info->cmd_ctrl.pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) writel(*(u32*)(cmd), ®s->CmdRing[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) idx = (idx - 1) % CMD_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) rrpriv->info->cmd_ctrl.pi = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (readl(®s->Mode) & FATAL_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) printk("error code %02x\n", readl(®s->Fail1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * Reset the board in a sensible manner. The NIC is already halted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * when we get here and a spin-lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static int rr_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct rr_private *rrpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct rr_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) u32 start_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) rr_load_firmware(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) writel(0x01000000, ®s->TX_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) writel(0xff800000, ®s->RX_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) writel(0, ®s->AssistState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) writel(CLEAR_INTA, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) writel(0x01, ®s->BrkPt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) writel(0, ®s->Timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) writel(0, ®s->TimerRef);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) writel(RESET_DMA, ®s->DmaReadState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) writel(RESET_DMA, ®s->DmaWriteState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) writel(0, ®s->DmaWriteHostHi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) writel(0, ®s->DmaWriteHostLo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) writel(0, ®s->DmaReadHostHi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) writel(0, ®s->DmaReadHostLo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) writel(0, ®s->DmaReadLen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) writel(0, ®s->DmaWriteLen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) writel(0, ®s->DmaWriteLcl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) writel(0, ®s->DmaWriteIPchecksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) writel(0, ®s->DmaReadLcl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) writel(0, ®s->DmaReadIPchecksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) writel(0, ®s->PciState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, ®s->Mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #elif (BITS_PER_LONG == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, ®s->Mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, ®s->Mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * Don't worry, this is just black magic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) writel(0xdf000, ®s->RxBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) writel(0xdf000, ®s->RxPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) writel(0xdf000, ®s->RxCon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) writel(0xce000, ®s->TxBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) writel(0xce000, ®s->TxPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) writel(0xce000, ®s->TxCon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) writel(0, ®s->RxIndPro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) writel(0, ®s->RxIndCon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) writel(0, ®s->RxIndRef);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) writel(0, ®s->TxIndPro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) writel(0, ®s->TxIndCon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) writel(0, ®s->TxIndRef);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) writel(0xcc000, ®s->pad10[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) writel(0, ®s->DrCmndPro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) writel(0, ®s->DrCmndCon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) writel(0, ®s->DwCmndPro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) writel(0, ®s->DwCmndCon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) writel(0, ®s->DwCmndRef);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) writel(0, ®s->DrDataPro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) writel(0, ®s->DrDataCon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) writel(0, ®s->DrDataRef);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) writel(0, ®s->DwDataPro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) writel(0, ®s->DwDataCon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) writel(0, ®s->DwDataRef);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) writel(0xffffffff, ®s->MbEvent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) writel(0, ®s->Event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) writel(0, ®s->TxPi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) writel(0, ®s->IpRxPi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) writel(0, ®s->EvtCon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) writel(0, ®s->EvtPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) rrpriv->info->evt_ctrl.pi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) for (i = 0; i < CMD_RING_ENTRIES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) writel(0, ®s->CmdRing[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * Why 32 ? is this not cache line size dependent?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) writel(RBURST_64|WBURST_64, ®s->PciState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) start_pc = rr_read_eeprom_word(rrpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) offsetof(struct eeprom, rncd_info.FwStart));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) #if (DEBUG > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) printk("%s: Executing firmware at address 0x%06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dev->name, start_pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) writel(start_pc + 0x800, ®s->Pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) writel(start_pc, ®s->Pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * Read a string from the EEPROM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) unsigned long offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned long length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct rr_regs __iomem *regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) u32 misc, io, host, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) io = readl(®s->ExtIo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) writel(0, ®s->ExtIo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) misc = readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) writel(0, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) host = readl(®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) writel(host | HALT_NIC, ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) for (i = 0; i < length; i++){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) writel((EEPROM_BASE + ((offset+i) << 3)), ®s->WinBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) buf[i] = (readl(®s->WinData) >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) writel(host, ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) writel(misc, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) writel(io, ®s->ExtIo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * Shortcut to read one word (4 bytes) out of the EEPROM and convert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * it to our CPU byte-order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static u32 rr_read_eeprom_word(struct rr_private *rrpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) size_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) __be32 word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if ((rr_read_eeprom(rrpriv, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) (unsigned char *)&word, 4) == 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return be32_to_cpu(word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Write a string to the EEPROM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * This is only called when the firmware is not running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static unsigned int write_eeprom(struct rr_private *rrpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) unsigned long offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) unsigned long length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct rr_regs __iomem *regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) u32 misc, io, data, i, j, ready, error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) io = readl(®s->ExtIo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) writel(0, ®s->ExtIo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) misc = readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) writel(ENABLE_EEPROM_WRITE, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) for (i = 0; i < length; i++){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) writel((EEPROM_BASE + ((offset+i) << 3)), ®s->WinBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) data = buf[i] << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * Only try to write the data if it is not the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * value already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if ((readl(®s->WinData) & 0xff000000) != data){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) writel(data, ®s->WinData);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) ready = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) while(!ready){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if ((readl(®s->WinData) & 0xff000000) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ready = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (j++ > 5000){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) printk("data mismatch: %08x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) "WinData %08x\n", data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) readl(®s->WinData));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ready = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) writel(misc, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) writel(io, ®s->ExtIo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static int rr_init(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct rr_private *rrpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct rr_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) u32 sram_size, rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) rev = readl(®s->FwRev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) rrpriv->fw_rev = rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (rev > 0x00020024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) printk(" Firmware revision: %i.%i.%i\n", (rev >> 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ((rev >> 8) & 0xff), (rev & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) else if (rev >= 0x00020000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) printk(" Firmware revision: %i.%i.%i (2.0.37 or "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) "later is recommended)\n", (rev >> 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ((rev >> 8) & 0xff), (rev & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }else{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) printk(" Firmware revision too old: %i.%i.%i, please "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) "upgrade to 2.0.37 or later.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) #if (DEBUG > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) printk(" Maximum receive rings %i\n", readl(®s->MaxRxRng));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * Read the hardware address from the eeprom. The HW address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * is not really necessary for HIPPI but awfully convenient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * The pointer arithmetic to put it in dev_addr is ugly, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * Donald Becker does it this way for the GigE version of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * card and it's shorter and more portable than any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * other method I've seen. -VAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) *(__be16 *)(dev->dev_addr) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) *(__be32 *)(dev->dev_addr+2) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4])));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) printk(" MAC: %pM\n", dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) sram_size = rr_read_eeprom_word(rrpriv, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) printk(" SRAM size 0x%06x\n", sram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static int rr_init1(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct rr_private *rrpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct rr_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) unsigned long myjif, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) u32 hostctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int ecode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) spin_lock_irqsave(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) hostctrl = readl(®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) writel(hostctrl | HALT_NIC | RR_CLEAR_INT, ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (hostctrl & PARITY_ERR){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) printk("%s: Parity error halting NIC - this is serious!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) spin_unlock_irqrestore(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ecode = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) set_rxaddr(regs, rrpriv->rx_ctrl_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) set_infoaddr(regs, rrpriv->info_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) rrpriv->info->evt_ctrl.entry_size = sizeof(struct event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) rrpriv->info->evt_ctrl.mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) rrpriv->info->evt_ctrl.pi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) rrpriv->info->cmd_ctrl.mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) rrpriv->info->cmd_ctrl.pi = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) for (i = 0; i < CMD_RING_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) writel(0, ®s->CmdRing[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) for (i = 0; i < TX_RING_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) rrpriv->tx_ring[i].size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) set_rraddr(&rrpriv->tx_ring[i].addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) rrpriv->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) rrpriv->info->tx_ctrl.mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) rrpriv->info->tx_ctrl.pi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * Set dirty_tx before we start receiving interrupts, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * the interrupt handler might think it is supposed to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * tx ints before we are up and running, which may cause a null
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * pointer access in the int handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) rrpriv->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) rrpriv->cur_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) rrpriv->dirty_rx = rrpriv->dirty_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) rr_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* Tuning values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) writel(0x5000, ®s->ConRetry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) writel(0x100, ®s->ConRetryTmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) writel(0x500000, ®s->ConTmout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) writel(0x60, ®s->IntrTmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) writel(0x500000, ®s->TxDataMvTimeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) writel(0x200000, ®s->RxDataMvTimeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) writel(0x80, ®s->WriteDmaThresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) writel(0x80, ®s->ReadDmaThresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) rrpriv->fw_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) writel(hostctrl, ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) spin_unlock_irqrestore(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) for (i = 0; i < RX_RING_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) rrpriv->rx_ring[i].mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) printk(KERN_WARNING "%s: Unable to allocate memory "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) "for receive ring - halting NIC\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ecode = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) rrpriv->rx_skbuff[i] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) addr = dma_map_single(&rrpriv->pci_dev->dev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dev->mtu + HIPPI_HLEN, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Sanity test to see if we conflict with the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * limitations of the Roadrunner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if ((((unsigned long)skb->data) & 0xfff) > ~65320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) printk("skb alloc error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) set_rraddr(&rrpriv->rx_ring[i].addr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) rrpriv->rx_ctrl[4].mode = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) rrpriv->rx_ctrl[4].pi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * Now start the FirmWare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) cmd.code = C_START_FW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) cmd.ring = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) cmd.index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) rr_issue_cmd(rrpriv, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * Give the FirmWare time to chew on the `get running' command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) myjif = jiffies + 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) while (time_before(jiffies, myjif) && !rrpriv->fw_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return ecode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * We might have gotten here because we are out of memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * make sure we release everything we allocated before failing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) for (i = 0; i < RX_RING_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct sk_buff *skb = rrpriv->rx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) dma_unmap_single(&rrpriv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) rrpriv->rx_ring[i].addr.addrlo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) dev->mtu + HIPPI_HLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) rrpriv->rx_ring[i].size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) set_rraddr(&rrpriv->rx_ring[i].addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) rrpriv->rx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return ecode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * All events are considered to be slow (RX/TX ints do not generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * events) and are handled here, outside the main interrupt handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * to reduce the size of the handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct rr_private *rrpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct rr_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) while (prodidx != eidx){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) switch (rrpriv->evt_ring[eidx].code){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) case E_NIC_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) tmp = readl(®s->FwRev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) printk(KERN_INFO "%s: Firmware revision %i.%i.%i "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) "up and running\n", dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) (tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) rrpriv->fw_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) writel(RX_RING_ENTRIES - 1, ®s->IpRxPi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) case E_LINK_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) printk(KERN_INFO "%s: Optical link ON\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) case E_LINK_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) printk(KERN_INFO "%s: Optical link OFF\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) case E_RX_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) printk(KERN_WARNING "%s: RX data not moving\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) case E_WATCHDOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) printk(KERN_INFO "%s: The watchdog is here to see "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) "us\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) case E_INTERN_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) printk(KERN_ERR "%s: HIPPI Internal NIC error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) case E_HOST_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) printk(KERN_ERR "%s: Host software error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * TX events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) case E_CON_REJ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) printk(KERN_WARNING "%s: Connection rejected\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) case E_CON_TMOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) printk(KERN_WARNING "%s: Connection timeout\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) case E_DISC_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) printk(KERN_WARNING "%s: HIPPI disconnect error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) case E_INT_PRTY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) printk(KERN_ERR "%s: HIPPI Internal Parity error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) case E_TX_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) printk(KERN_WARNING "%s: Transmitter idle\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) case E_TX_LINK_DROP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) printk(KERN_WARNING "%s: Link lost during transmit\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) case E_TX_INV_RNG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) printk(KERN_ERR "%s: Invalid send ring block\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) case E_TX_INV_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) printk(KERN_ERR "%s: Invalid send buffer address\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) case E_TX_INV_DSC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) printk(KERN_ERR "%s: Invalid descriptor address\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * RX events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) case E_RX_RNG_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) printk(KERN_INFO "%s: Receive ring full\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) case E_RX_PAR_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) printk(KERN_WARNING "%s: Receive parity error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) case E_RX_LLRC_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) printk(KERN_WARNING "%s: Receive LLRC error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) case E_PKT_LN_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) printk(KERN_WARNING "%s: Receive packet length "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) "error\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) case E_DTA_CKSM_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) printk(KERN_WARNING "%s: Data checksum error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) case E_SHT_BST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) printk(KERN_WARNING "%s: Unexpected short burst "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) "error\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) case E_STATE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) printk(KERN_WARNING "%s: Recv. state transition"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) " error\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) case E_UNEXP_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) printk(KERN_WARNING "%s: Unexpected data error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) case E_LST_LNK_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) printk(KERN_WARNING "%s: Link lost error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) case E_FRM_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) printk(KERN_WARNING "%s: Framing Error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) case E_FLG_SYN_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) printk(KERN_WARNING "%s: Flag sync. lost during "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) "packet\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) case E_RX_INV_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) printk(KERN_ERR "%s: Invalid receive buffer "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) "address\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) case E_RX_INV_DSC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) printk(KERN_ERR "%s: Invalid receive descriptor "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) "address\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) case E_RNG_BLK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) printk(KERN_ERR "%s: Invalid ring block\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* Label packet to be dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * Actual dropping occurs in rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * The index of packet we get to drop is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * the index of the packet following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * the bad packet. -kbf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) u16 index = rrpriv->evt_ring[eidx].index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) index = (index + (RX_RING_ENTRIES - 1)) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) RX_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) rrpriv->rx_ring[index].mode |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) (PACKET_BAD | PACKET_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) printk(KERN_WARNING "%s: Unhandled event 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) dev->name, rrpriv->evt_ring[eidx].code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) eidx = (eidx + 1) % EVT_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) rrpriv->info->evt_ctrl.pi = eidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return eidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct rr_private *rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct rr_regs __iomem *regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct rx_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) u32 pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) desc = &(rrpriv->rx_ring[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) pkt_len = desc->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) #if (DEBUG > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) printk("index %i, rxlimit %i\n", index, rxlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) printk("len %x, mode %x\n", pkt_len, desc->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) goto defer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (pkt_len > 0){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct sk_buff *skb, *rx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) rx_skb = rrpriv->rx_skbuff[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (pkt_len < PKT_COPY_THRESHOLD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) skb = alloc_skb(pkt_len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (skb == NULL){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) goto defer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) dma_sync_single_for_cpu(&rrpriv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) desc->addr.addrlo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) pkt_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) skb_put_data(skb, rx_skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) dma_sync_single_for_device(&rrpriv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) desc->addr.addrlo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) pkt_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }else{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct sk_buff *newskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) newskb = alloc_skb(dev->mtu + HIPPI_HLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (newskb){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) dma_unmap_single(&rrpriv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) desc->addr.addrlo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) dev->mtu + HIPPI_HLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) skb = rx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) skb_put(skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) rrpriv->rx_skbuff[index] = newskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) addr = dma_map_single(&rrpriv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) newskb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) dev->mtu + HIPPI_HLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) set_rraddr(&desc->addr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) printk("%s: Out of memory, deferring "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) "packet\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) goto defer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) skb->protocol = hippi_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) netif_rx(skb); /* send it up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) dev->stats.rx_bytes += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) defer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) desc->mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) desc->size = dev->mtu + HIPPI_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if ((index & 7) == 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) writel(index, ®s->IpRxPi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) index = (index + 1) % RX_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) } while(index != rxlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) rrpriv->cur_rx = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) static irqreturn_t rr_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct rr_private *rrpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct rr_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct net_device *dev = (struct net_device *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (!(readl(®s->HostCtrl) & RR_INT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) spin_lock(&rrpriv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) prodidx = readl(®s->EvtPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) txcsmr = (prodidx >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) rxlimit = (prodidx >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) prodidx &= 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) #if (DEBUG > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) prodidx, rrpriv->info->evt_ctrl.pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * Order here is important. We must handle events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * before doing anything else in order to catch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * such things as LLRC errors, etc -kbf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) eidx = rrpriv->info->evt_ctrl.pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (prodidx != eidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) eidx = rr_handle_event(dev, prodidx, eidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) rxindex = rrpriv->cur_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (rxindex != rxlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) rx_int(dev, rxlimit, rxindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) txcon = rrpriv->dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (txcsmr != txcon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* Due to occational firmware TX producer/consumer out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * of sync. error need to check entry in ring -kbf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if(rrpriv->tx_skbuff[txcon]){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct tx_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) desc = &(rrpriv->tx_ring[txcon]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) skb = rrpriv->tx_skbuff[txcon];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) dma_unmap_single(&rrpriv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) desc->addr.addrlo, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) rrpriv->tx_skbuff[txcon] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) desc->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) set_rraddr(&rrpriv->tx_ring[txcon].addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) desc->mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) txcon = (txcon + 1) % TX_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) } while (txcsmr != txcon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) rrpriv->dirty_tx = txcon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (rrpriv->tx_full && rr_if_busy(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) (((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) != rrpriv->dirty_tx)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) rrpriv->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) eidx |= ((txcsmr << 8) | (rxlimit << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) writel(eidx, ®s->EvtCon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) spin_unlock(&rrpriv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static inline void rr_raz_tx(struct rr_private *rrpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) for (i = 0; i < TX_RING_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct sk_buff *skb = rrpriv->tx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct tx_desc *desc = &(rrpriv->tx_ring[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) dma_unmap_single(&rrpriv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) desc->addr.addrlo, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) desc->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) set_rraddr(&desc->addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) rrpriv->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static inline void rr_raz_rx(struct rr_private *rrpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) for (i = 0; i < RX_RING_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct sk_buff *skb = rrpriv->rx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) struct rx_desc *desc = &(rrpriv->rx_ring[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) dma_unmap_single(&rrpriv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) desc->addr.addrlo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) dev->mtu + HIPPI_HLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) desc->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) set_rraddr(&desc->addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) rrpriv->rx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static void rr_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct rr_private *rrpriv = from_timer(rrpriv, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct net_device *dev = pci_get_drvdata(rrpriv->pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct rr_regs __iomem *regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (readl(®s->HostCtrl) & NIC_HALTED){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) printk("%s: Restarting nic\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) memset(rrpriv->info, 0, sizeof(struct rr_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) rr_raz_tx(rrpriv, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) rr_raz_rx(rrpriv, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (rr_init1(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) spin_lock_irqsave(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) spin_unlock_irqrestore(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) rrpriv->timer.expires = RUN_AT(5*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) add_timer(&rrpriv->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) static int rr_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) struct rr_private *rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct pci_dev *pdev = rrpriv->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct rr_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) int ecode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (rrpriv->fw_rev < 0x00020000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) printk(KERN_WARNING "%s: trying to configure device with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) "obsolete firmware\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) ecode = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) rrpriv->rx_ctrl = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 256 * sizeof(struct ring_ctrl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) &dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (!rrpriv->rx_ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ecode = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) rrpriv->rx_ctrl_dma = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) rrpriv->info = dma_alloc_coherent(&pdev->dev, sizeof(struct rr_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) &dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (!rrpriv->info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) ecode = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) rrpriv->info_dma = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) spin_lock_irqsave(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) readl(®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) spin_unlock_irqrestore(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) dev->name, pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ecode = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if ((ecode = rr_init1(dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /* Set the timer to switch to check for link beat and perhaps switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) to an alternate media type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) timer_setup(&rrpriv->timer, rr_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) add_timer(&rrpriv->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) return ecode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) spin_lock_irqsave(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) spin_unlock_irqrestore(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (rrpriv->info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) dma_free_coherent(&pdev->dev, sizeof(struct rr_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) rrpriv->info, rrpriv->info_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) rrpriv->info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (rrpriv->rx_ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) rrpriv->rx_ctrl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return ecode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static void rr_dump(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct rr_private *rrpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct rr_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) u32 index, cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) printk("%s: dumping NIC TX rings\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) readl(®s->RxPrd), readl(®s->TxPrd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) readl(®s->EvtPrd), readl(®s->TxPi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) rrpriv->info->tx_ctrl.pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) printk("Error code 0x%x\n", readl(®s->Fail1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) index = (((readl(®s->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) cons = rrpriv->dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) printk("TX ring index %i, TX consumer %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) index, cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (rrpriv->tx_skbuff[index]){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) for (i = 0; i < len; i++){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (!(i & 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (rrpriv->tx_skbuff[cons]){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %p, truesize 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) rrpriv->tx_ring[cons].mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) rrpriv->tx_ring[cons].size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) rrpriv->tx_skbuff[cons]->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) (unsigned int)rrpriv->tx_skbuff[cons]->truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) for (i = 0; i < len; i++){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (!(i & 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) printk("dumping TX ring info:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) for (i = 0; i < TX_RING_ENTRIES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) rrpriv->tx_ring[i].mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) rrpriv->tx_ring[i].size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) (unsigned long long) rrpriv->tx_ring[i].addr.addrlo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) static int rr_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct rr_private *rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct rr_regs __iomem *regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) struct pci_dev *pdev = rrpriv->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * Lock to make sure we are not cleaning up while another CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * is handling interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) spin_lock_irqsave(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) tmp = readl(®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (tmp & NIC_HALTED){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) printk("%s: NIC already halted\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) rr_dump(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }else{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) tmp |= HALT_NIC | RR_CLEAR_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) writel(tmp, ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) readl(®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) rrpriv->fw_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) del_timer_sync(&rrpriv->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) writel(0, ®s->TxPi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) writel(0, ®s->IpRxPi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) writel(0, ®s->EvtCon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) writel(0, ®s->EvtPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) for (i = 0; i < CMD_RING_ENTRIES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) writel(0, ®s->CmdRing[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) rrpriv->info->tx_ctrl.entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) rrpriv->info->cmd_ctrl.pi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) rrpriv->info->evt_ctrl.pi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) rrpriv->rx_ctrl[4].entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) rr_raz_tx(rrpriv, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) rr_raz_rx(rrpriv, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) rrpriv->rx_ctrl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) dma_free_coherent(&pdev->dev, sizeof(struct rr_info), rrpriv->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) rrpriv->info_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) rrpriv->info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) spin_unlock_irqrestore(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) free_irq(pdev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) struct rr_private *rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) struct rr_regs __iomem *regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) struct ring_ctrl *txctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) u32 index, len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) u32 *ifield;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct sk_buff *new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (readl(®s->Mode) & FATAL_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) printk("error codes Fail1 %02x, Fail2 %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) readl(®s->Fail1), readl(®s->Fail2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * We probably need to deal with tbusy here to prevent overruns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (skb_headroom(skb) < 8){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) printk("incoming skb too small - reallocating\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (!(new_skb = dev_alloc_skb(len + 8))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) skb_reserve(new_skb, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) skb_put(new_skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) skb_copy_from_linear_data(skb, new_skb->data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) skb = new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) ifield = skb_push(skb, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) ifield[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) ifield[1] = hcb->ifield;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * We don't need the lock before we are actually going to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * fiddling with the control blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) spin_lock_irqsave(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) txctrl = &rrpriv->info->tx_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) index = txctrl->pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) rrpriv->tx_skbuff[index] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) set_rraddr(&rrpriv->tx_ring[index].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) dma_map_single(&rrpriv->pci_dev->dev, skb->data, len + 8, DMA_TO_DEVICE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) txctrl->pi = (index + 1) % TX_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) writel(txctrl->pi, ®s->TxPi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (txctrl->pi == rrpriv->dirty_tx){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) rrpriv->tx_full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) spin_unlock_irqrestore(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * Read the firmware out of the EEPROM and put it into the SRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * (or from user space - later)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * This operation requires the NIC to be halted and is performed with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * interrupts disabled and with the spinlock hold.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) static int rr_load_firmware(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) struct rr_private *rrpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct rr_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) size_t eptr, segptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) u32 localctrl, sptr, len, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) u32 p2len, p2size, nr_seg, revision, io, sram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) regs = rrpriv->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (dev->flags & IFF_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (!(readl(®s->HostCtrl) & NIC_HALTED)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) printk("%s: Trying to load firmware to a running NIC.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) localctrl = readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) writel(0, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) writel(0, ®s->EvtPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) writel(0, ®s->RxPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) writel(0, ®s->TxPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * First wipe the entire SRAM, otherwise we might run into all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * kinds of trouble ... sigh, this took almost all afternoon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * to track down ;-(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) io = readl(®s->ExtIo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) writel(0, ®s->ExtIo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) sram_size = rr_read_eeprom_word(rrpriv, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) for (i = 200; i < sram_size / 4; i++){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) writel(i * 4, ®s->WinBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) writel(0, ®s->WinData);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) writel(io, ®s->ExtIo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) eptr = rr_read_eeprom_word(rrpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) offsetof(struct eeprom, rncd_info.AddrRunCodeSegs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) eptr = ((eptr & 0x1fffff) >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) p2len = rr_read_eeprom_word(rrpriv, 0x83*4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) p2len = (p2len << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) p2size = rr_read_eeprom_word(rrpriv, 0x84*4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) p2size = ((p2size & 0x1fffff) >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if ((eptr < p2size) || (eptr > (p2size + p2len))){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) printk("%s: eptr is invalid\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) revision = rr_read_eeprom_word(rrpriv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) offsetof(struct eeprom, manf.HeaderFmt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (revision != 1){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) printk("%s: invalid firmware format (%i)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) dev->name, revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) nr_seg = rr_read_eeprom_word(rrpriv, eptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) eptr +=4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) #if (DEBUG > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) printk("%s: nr_seg %i\n", dev->name, nr_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) for (i = 0; i < nr_seg; i++){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) sptr = rr_read_eeprom_word(rrpriv, eptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) eptr += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) len = rr_read_eeprom_word(rrpriv, eptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) eptr += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) segptr = rr_read_eeprom_word(rrpriv, eptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) segptr = ((segptr & 0x1fffff) >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) eptr += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) #if (DEBUG > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) dev->name, i, sptr, len, segptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) for (j = 0; j < len; j++){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) tmp = rr_read_eeprom_word(rrpriv, segptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) writel(sptr, ®s->WinBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) writel(tmp, ®s->WinData);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) segptr += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) sptr += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) writel(localctrl, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) struct rr_private *rrpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) unsigned char *image, *oldimage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) int error = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) rrpriv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) switch(cmd){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) case SIOCRRGFW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (!capable(CAP_SYS_RAWIO)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) image = kmalloc_array(EEPROM_WORDS, sizeof(u32), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (!image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (rrpriv->fw_running){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) printk("%s: Firmware already running\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) error = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) goto gf_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) spin_lock_irqsave(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) spin_unlock_irqrestore(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (i != EEPROM_BYTES){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) printk(KERN_ERR "%s: Error reading EEPROM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) error = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) goto gf_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) error = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) gf_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) kfree(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) case SIOCRRPFW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (!capable(CAP_SYS_RAWIO)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) image = memdup_user(rq->ifr_data, EEPROM_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (IS_ERR(image))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return PTR_ERR(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) oldimage = kmalloc(EEPROM_BYTES, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (!oldimage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) kfree(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (rrpriv->fw_running){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) printk("%s: Firmware already running\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) error = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) goto wf_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) printk("%s: Updating EEPROM firmware\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) spin_lock_irqsave(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) printk(KERN_ERR "%s: Error writing EEPROM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) spin_unlock_irqrestore(&rrpriv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (i != EEPROM_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) printk(KERN_ERR "%s: Error reading back EEPROM "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) "image\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) error = memcmp(image, oldimage, EEPROM_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (error){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) printk(KERN_ERR "%s: Error verifying EEPROM image\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) error = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) wf_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) kfree(oldimage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) kfree(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) case SIOCRRID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) return put_user(0x52523032, (int __user *)rq->ifr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) static const struct pci_device_id rr_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) PCI_ANY_ID, PCI_ANY_ID, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) { 0,}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) MODULE_DEVICE_TABLE(pci, rr_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) static struct pci_driver rr_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) .name = "rrunner",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) .id_table = rr_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) .probe = rr_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) .remove = rr_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) module_pci_driver(rr_driver);