^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Network device driver for the MACE ethernet controller on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Apple Powermacs. Assumes it's under a DBDMA controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1996 Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/bitrev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/dbdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/macio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "mace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static int port_aaui = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define N_RX_RING 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define N_TX_RING 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define MAX_TX_ACTIVE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define NCMDS_TX 1 /* dma commands per element in tx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define RX_BUFLEN (ETH_FRAME_LEN + 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define TX_TIMEOUT HZ /* 1 second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Chip rev needs workaround on HW & multicast addr change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define BROKEN_ADDRCHG_REV 0x0941
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Bits in transmit DMA status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define TX_DMA_ERR 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct mace_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) volatile struct mace __iomem *mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) volatile struct dbdma_regs __iomem *tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int tx_dma_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) volatile struct dbdma_regs __iomem *rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int rx_dma_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct sk_buff *rx_bufs[N_RX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int rx_fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int rx_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct sk_buff *tx_bufs[N_TX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int tx_fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int tx_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned char maccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned char tx_fullup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned char tx_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned char tx_bad_runt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct timer_list tx_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int timeout_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int port_aaui;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int chipid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct macio_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Number of bytes of private data per MACE: allow enough for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * the rx and tx dma commands plus a branch dma command each,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * and another 16 bytes to allow us to align the dma command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * buffers on a 16 byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define PRIV_BYTES (sizeof(struct mace_data) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int mace_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static int mace_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static void mace_set_multicast(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void mace_reset(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static int mace_set_address(struct net_device *dev, void *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static irqreturn_t mace_interrupt(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static irqreturn_t mace_txdma_intr(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static irqreturn_t mace_rxdma_intr(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static void mace_set_timeout(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void mace_tx_timeout(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static inline void mace_clean_rings(struct mace_data *mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void __mace_set_address(struct net_device *dev, void *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * If we can't get a skbuff when we need it, we use this area for DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static unsigned char *dummy_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static const struct net_device_ops mace_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) .ndo_open = mace_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) .ndo_stop = mace_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) .ndo_start_xmit = mace_xmit_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .ndo_set_rx_mode = mace_set_multicast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .ndo_set_mac_address = mace_set_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct device_node *mace = macio_get_of_node(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct mace_data *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) const unsigned char *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int j, rev, rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) printk(KERN_ERR "can't use MACE %pOF: need 3 addrs and 3 irqs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) mace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) addr = of_get_property(mace, "mac-address", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (addr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) addr = of_get_property(mace, "local-mac-address", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (addr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) printk(KERN_ERR "Can't get mac-address for MACE %pOF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) mace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * lazy allocate the driver-wide dummy buffer. (Note that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * never have more than one MACE in the system anyway)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (dummy_buf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (dummy_buf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (macio_request_resources(mdev, "mace")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) printk(KERN_ERR "MACE: can't request IO resources !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) dev = alloc_etherdev(PRIV_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) goto err_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) mp->mdev = mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) macio_set_drvdata(mdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) dev->base_addr = macio_resource_start(mdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) mp->mace = ioremap(dev->base_addr, 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (mp->mace == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) printk(KERN_ERR "MACE: can't map IO resources !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) dev->irq = macio_irq(mdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) rev = addr[0] == 0 && addr[1] == 0xA0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) for (j = 0; j < 6; ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) in_8(&mp->mace->chipid_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) mp->maccc = ENXMT | ENRCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (mp->tx_dma == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) printk(KERN_ERR "MACE: can't map TX DMA resources !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) goto err_unmap_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) mp->tx_dma_intr = macio_irq(mdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (mp->rx_dma == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) printk(KERN_ERR "MACE: can't map RX DMA resources !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) goto err_unmap_tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) mp->rx_dma_intr = macio_irq(mdev, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) memset((char *) mp->tx_cmds, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) timer_setup(&mp->tx_timeout, mace_tx_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) spin_lock_init(&mp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) mp->timeout_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (port_aaui >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) mp->port_aaui = port_aaui;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* Apple Network Server uses the AAUI port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (of_machine_is_compatible("AAPL,ShinerESB"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) mp->port_aaui = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #ifdef CONFIG_MACE_AAUI_PORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) mp->port_aaui = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) mp->port_aaui = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) dev->netdev_ops = &mace_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Most of what is below could be moved to mace_open()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) mace_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) goto err_unmap_rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) goto err_free_tx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) rc = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) printk(KERN_ERR "MACE: Cannot register net device, aborting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) goto err_free_rx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) printk(KERN_INFO "%s: MACE at %pM, chip revision %d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) dev->name, dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) mp->chipid >> 8, mp->chipid & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) err_free_rx_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) free_irq(macio_irq(mdev, 2), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) err_free_tx_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) free_irq(macio_irq(mdev, 1), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) err_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) free_irq(macio_irq(mdev, 0), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) err_unmap_rx_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) iounmap(mp->rx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) err_unmap_tx_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) iounmap(mp->tx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) err_unmap_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) iounmap(mp->mace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) err_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) macio_release_resources(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int mace_remove(struct macio_dev *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct net_device *dev = macio_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct mace_data *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) BUG_ON(dev == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) macio_set_drvdata(mdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) free_irq(mp->tx_dma_intr, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) free_irq(mp->rx_dma_intr, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) iounmap(mp->rx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) iounmap(mp->tx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) iounmap(mp->mace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) macio_release_resources(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * Yes this looks peculiar, but apparently it needs to be this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * way on some machines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) for (i = 200; i > 0; --i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (le32_to_cpu(dma->control) & RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void mace_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) volatile struct mace __iomem *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* soft-reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) i = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) while (--i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) out_8(&mb->biucc, SWRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (in_8(&mb->biucc) & SWRST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (!i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) printk(KERN_ERR "mace: cannot reset chip!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) out_8(&mb->imr, 0xff); /* disable all intrs for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) i = in_8(&mb->ir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) out_8(&mb->maccc, 0); /* turn off tx, rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) out_8(&mb->biucc, XMTSP_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) out_8(&mb->utr, RTRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) out_8(&mb->rcvfc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* load up the hardware address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) __mace_set_address(dev, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* clear the multicast filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (mp->chipid == BROKEN_ADDRCHG_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) out_8(&mb->iac, LOGADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) out_8(&mb->iac, ADDRCHG | LOGADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) while ((in_8(&mb->iac) & ADDRCHG) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) for (i = 0; i < 8; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) out_8(&mb->ladrf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* done changing address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (mp->chipid != BROKEN_ADDRCHG_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) out_8(&mb->iac, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (mp->port_aaui)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static void __mace_set_address(struct net_device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) volatile struct mace __iomem *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) unsigned char *p = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* load up the hardware address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (mp->chipid == BROKEN_ADDRCHG_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) out_8(&mb->iac, PHYADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) out_8(&mb->iac, ADDRCHG | PHYADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) while ((in_8(&mb->iac) & ADDRCHG) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) for (i = 0; i < 6; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) out_8(&mb->padr, dev->dev_addr[i] = p[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (mp->chipid != BROKEN_ADDRCHG_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) out_8(&mb->iac, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static int mace_set_address(struct net_device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) volatile struct mace __iomem *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) spin_lock_irqsave(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) __mace_set_address(dev, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* note: setting ADDRCHG clears ENRCV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) out_8(&mb->maccc, mp->maccc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) spin_unlock_irqrestore(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static inline void mace_clean_rings(struct mace_data *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* free some skb's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) for (i = 0; i < N_RX_RING; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (mp->rx_bufs[i] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dev_kfree_skb(mp->rx_bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) mp->rx_bufs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) for (i = mp->tx_empty; i != mp->tx_fill; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dev_kfree_skb(mp->tx_bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (++i >= N_TX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int mace_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) volatile struct mace __iomem *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) volatile struct dbdma_regs __iomem *td = mp->tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) volatile struct dbdma_cmd *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) unsigned char *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /* reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) mace_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* initialize list of sk_buffs for receiving and set up recv dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) mace_clean_rings(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) cp = mp->rx_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) for (i = 0; i < N_RX_RING - 1; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) data = dummy_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) data = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) mp->rx_bufs[i] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) cp->req_count = cpu_to_le16(RX_BUFLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) cp->command = cpu_to_le16(INPUT_LAST + INTR_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) cp->phy_addr = cpu_to_le32(virt_to_bus(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) cp->xfer_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ++cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) mp->rx_bufs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) cp->command = cpu_to_le16(DBDMA_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) mp->rx_fill = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) mp->rx_empty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* Put a branch back to the beginning of the receive command list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ++cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* start rx dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) out_le32(&rd->control, (RUN << 16) | RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* put a branch at the end of the tx command list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* reset tx dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) mp->tx_fill = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) mp->tx_empty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) mp->tx_fullup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) mp->tx_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) mp->tx_bad_runt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* turn it on! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) out_8(&mb->maccc, mp->maccc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* enable all interrupts except receive interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) out_8(&mb->imr, RCVINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static int mace_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) volatile struct mace __iomem *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) volatile struct dbdma_regs __iomem *td = mp->tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* disable rx and tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) out_8(&mb->maccc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) out_8(&mb->imr, 0xff); /* disable all intrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* disable rx and tx dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) rd->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) td->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) mace_clean_rings(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static inline void mace_set_timeout(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (mp->timeout_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) del_timer(&mp->tx_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) add_timer(&mp->tx_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) mp->timeout_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) volatile struct dbdma_regs __iomem *td = mp->tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) volatile struct dbdma_cmd *cp, *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) int fill, next, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* see if there's a free slot in the tx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) spin_lock_irqsave(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) fill = mp->tx_fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) next = fill + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (next >= N_TX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (next == mp->tx_empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) mp->tx_fullup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) spin_unlock_irqrestore(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return NETDEV_TX_BUSY; /* can't take it at the moment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) spin_unlock_irqrestore(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* partially fill in the dma command block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (len > ETH_FRAME_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) len = ETH_FRAME_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) mp->tx_bufs[fill] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) cp = mp->tx_cmds + NCMDS_TX * fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) cp->req_count = cpu_to_le16(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) cp->phy_addr = cpu_to_le32(virt_to_bus(skb->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) np = mp->tx_cmds + NCMDS_TX * next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) out_le16(&np->command, DBDMA_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* poke the tx dma channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) spin_lock_irqsave(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) mp->tx_fill = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) out_le16(&cp->xfer_status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) out_le16(&cp->command, OUTPUT_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ++mp->tx_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) mace_set_timeout(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (++next >= N_TX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (next == mp->tx_empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) spin_unlock_irqrestore(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static void mace_set_multicast(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) volatile struct mace __iomem *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) spin_lock_irqsave(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) mp->maccc &= ~PROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) mp->maccc |= PROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) unsigned char multicast_filter[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) multicast_filter[i] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) multicast_filter[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) crc = ether_crc_le(6, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) i = crc >> 26; /* bit number in multicast_filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) multicast_filter[i >> 3] |= 1 << (i & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) printk("Multicast filter :");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) printk("%02x ", multicast_filter[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (mp->chipid == BROKEN_ADDRCHG_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) out_8(&mb->iac, LOGADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) out_8(&mb->iac, ADDRCHG | LOGADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) while ((in_8(&mb->iac) & ADDRCHG) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) for (i = 0; i < 8; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) out_8(&mb->ladrf, multicast_filter[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (mp->chipid != BROKEN_ADDRCHG_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) out_8(&mb->iac, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* reset maccc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) out_8(&mb->maccc, mp->maccc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) spin_unlock_irqrestore(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) volatile struct mace __iomem *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static int mace_babbles, mace_jabbers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (intr & MPCO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dev->stats.rx_missed_errors += 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (intr & RNTPCO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) dev->stats.rx_length_errors += 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (intr & CERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ++dev->stats.tx_heartbeat_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (intr & BABBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (mace_babbles++ < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) printk(KERN_DEBUG "mace: babbling transmitter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (intr & JABBER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (mace_jabbers++ < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) printk(KERN_DEBUG "mace: jabbering transceiver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static irqreturn_t mace_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct net_device *dev = (struct net_device *) dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) volatile struct mace __iomem *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) volatile struct dbdma_regs __iomem *td = mp->tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) volatile struct dbdma_cmd *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int intr, fs, i, stat, x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int xcount, dstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* static int mace_last_fs, mace_last_xcount; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) spin_lock_irqsave(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) intr = in_8(&mb->ir); /* read interrupt register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) in_8(&mb->xmtrc); /* get retries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) mace_handle_misc_intrs(mp, intr, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) i = mp->tx_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) while (in_8(&mb->pr) & XMTSV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) del_timer(&mp->tx_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) mp->timeout_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Clear any interrupt indication associated with this status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * word. This appears to unlatch any error indication from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * the DMA controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) intr = in_8(&mb->ir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (intr != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) mace_handle_misc_intrs(mp, intr, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (mp->tx_bad_runt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) fs = in_8(&mb->xmtfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) mp->tx_bad_runt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) out_8(&mb->xmtfc, AUTO_PAD_XMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) dstat = le32_to_cpu(td->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* stop DMA controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) out_le32(&td->control, RUN << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * xcount is the number of complete frames which have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * written to the fifo but for which status has not been read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (xcount == 0 || (dstat & DEAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * If a packet was aborted before the DMA controller has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * finished transferring it, it seems that there are 2 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * which are stuck in some buffer somewhere. These will get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * transmitted as soon as we read the frame status (which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * reenables the transmit data transfer request). Turning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * off the DMA controller and/or resetting the MACE doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * help. So we disable auto-padding and FCS transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * so the two bytes will only be a runt packet which should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * be ignored by other stations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) out_8(&mb->xmtfc, DXMTFCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) fs = in_8(&mb->xmtfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if ((fs & XMTSV) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) fs, xcount, dstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) mace_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * XXX mace likes to hang the machine after a xmtfs error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * This is hard to reproduce, resetting *may* help
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) cp = mp->tx_cmds + NCMDS_TX * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) stat = le16_to_cpu(cp->xfer_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * Check whether there were in fact 2 bytes written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * the transmit FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (x != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* there were two bytes with an end-of-packet indication */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) mp->tx_bad_runt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) mace_set_timeout(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * Either there weren't the two bytes buffered up, or they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * didn't have an end-of-packet indication.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * We flush the transmit FIFO just in case (by setting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * XMTFWU bit with the transmitter disabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) out_8(&mb->xmtfc, AUTO_PAD_XMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* dma should have finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (i == mp->tx_fill) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) fs, xcount, dstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* Update stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (fs & (UFLO|LCOL|LCAR|RTRY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ++dev->stats.tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (fs & LCAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ++dev->stats.tx_carrier_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (fs & (UFLO|LCOL|RTRY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ++dev->stats.tx_aborted_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) dev->stats.tx_bytes += mp->tx_bufs[i]->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ++dev->stats.tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) dev_consume_skb_irq(mp->tx_bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) --mp->tx_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (++i >= N_TX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) mace_last_fs = fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) mace_last_xcount = xcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (i != mp->tx_empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) mp->tx_fullup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) mp->tx_empty = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) i += mp->tx_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (i >= N_TX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) i -= N_TX_RING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* set up the next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) cp = mp->tx_cmds + NCMDS_TX * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) out_le16(&cp->xfer_status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) out_le16(&cp->command, OUTPUT_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ++mp->tx_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (++i >= N_TX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) mace_set_timeout(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) spin_unlock_irqrestore(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static void mace_tx_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct mace_data *mp = from_timer(mp, t, tx_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct net_device *dev = macio_get_drvdata(mp->mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) volatile struct mace __iomem *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) volatile struct dbdma_regs __iomem *td = mp->tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) volatile struct dbdma_cmd *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) spin_lock_irqsave(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) mp->timeout_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (mp->tx_active == 0 && !mp->tx_bad_runt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* update various counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) mace_handle_misc_intrs(mp, in_8(&mb->ir), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* turn off both tx and rx and reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) out_8(&mb->maccc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) printk(KERN_ERR "mace: transmit timeout - resetting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) dbdma_reset(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) mace_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* restart rx dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) dbdma_reset(rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) out_le16(&cp->xfer_status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) out_le32(&rd->cmdptr, virt_to_bus(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) out_le32(&rd->control, (RUN << 16) | RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /* fix up the transmit side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) i = mp->tx_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) mp->tx_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) ++dev->stats.tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (mp->tx_bad_runt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) mp->tx_bad_runt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) } else if (i != mp->tx_fill) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) dev_kfree_skb(mp->tx_bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (++i >= N_TX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) mp->tx_empty = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) mp->tx_fullup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (i != mp->tx_fill) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) cp = mp->tx_cmds + NCMDS_TX * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) out_le16(&cp->xfer_status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) out_le16(&cp->command, OUTPUT_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) out_le32(&td->cmdptr, virt_to_bus(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) out_le32(&td->control, (RUN << 16) | RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ++mp->tx_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) mace_set_timeout(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* turn it back on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) out_8(&mb->imr, RCVINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) out_8(&mb->maccc, mp->maccc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) spin_unlock_irqrestore(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static irqreturn_t mace_txdma_intr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct net_device *dev = (struct net_device *) dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) volatile struct dbdma_cmd *cp, *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int i, nb, stat, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) unsigned frame_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static int mace_lost_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) unsigned char *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) spin_lock_irqsave(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) for (i = mp->rx_empty; i != mp->rx_fill; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) cp = mp->rx_cmds + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) stat = le16_to_cpu(cp->xfer_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if ((stat & ACTIVE) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) next = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (next >= N_RX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) np = mp->rx_cmds + next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (next != mp->rx_fill &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) (le16_to_cpu(np->xfer_status) & ACTIVE) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) printk(KERN_DEBUG "mace: lost a status word\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) ++mace_lost_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) nb = le16_to_cpu(cp->req_count) - le16_to_cpu(cp->res_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) out_le16(&cp->command, DBDMA_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /* got a packet, have a look at it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) skb = mp->rx_bufs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) ++dev->stats.rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) } else if (nb > 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) data = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) frame_status = (data[nb-3] << 8) + data[nb-4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ++dev->stats.rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (frame_status & RS_OFLO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) ++dev->stats.rx_over_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (frame_status & RS_FRAMERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ++dev->stats.rx_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (frame_status & RS_FCSERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) ++dev->stats.rx_crc_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /* Mace feature AUTO_STRIP_RCV is on by default, dropping the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * FCS on frames with 802.3 headers. This means that Ethernet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * frames have 8 extra octets at the end, while 802.3 frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * have only 4. We need to correctly account for this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) nb -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) else /* Ethernet header; mace includes FCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) nb -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) skb_put(skb, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) dev->stats.rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) mp->rx_bufs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ++dev->stats.rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ++dev->stats.rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ++dev->stats.rx_length_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* advance to next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (++i >= N_RX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) mp->rx_empty = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) i = mp->rx_fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) next = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (next >= N_RX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (next == mp->rx_empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) cp = mp->rx_cmds + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) skb = mp->rx_bufs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) skb_reserve(skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) mp->rx_bufs[i] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) cp->req_count = cpu_to_le16(RX_BUFLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) data = skb? skb->data: dummy_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) cp->phy_addr = cpu_to_le32(virt_to_bus(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) out_le16(&cp->xfer_status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if ((le32_to_cpu(rd->status) & ACTIVE) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) out_le32(&rd->control, (PAUSE << 16) | PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) while ((in_le32(&rd->status) & ACTIVE) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) i = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (i != mp->rx_fill) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) mp->rx_fill = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) spin_unlock_irqrestore(&mp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) static const struct of_device_id mace_match[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) .name = "mace",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) MODULE_DEVICE_TABLE (of, mace_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static struct macio_driver mace_driver =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) .name = "mace",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) .of_match_table = mace_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) .probe = mace_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) .remove = mace_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static int __init mace_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return macio_register_driver(&mace_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static void __exit mace_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) macio_unregister_driver(&mace_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) kfree(dummy_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) dummy_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) MODULE_AUTHOR("Paul Mackerras");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) MODULE_DESCRIPTION("PowerMac MACE driver.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) module_param(port_aaui, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) module_init(mace_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) module_exit(mace_cleanup);