^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/sgi/hpc3.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/sgi/ip22.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/sgi/seeq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "sgiseeq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static char *sgiseeqstr = "SGI Seeq8003";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * If you want speed, you do something silly, it always has worked for me. So,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * with that in mind, I've decided to make this driver look completely like a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * stupid Lance from a driver architecture perspective. Only difference is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * here our "ring buffer" looks and acts like a real Lance one does but is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * laid out like how the HPC DMA and the Seeq want it to. You'd be surprised
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * how a stupid idea like this can pay off in performance, not to mention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * making this driver 2,000 times easier to write. ;-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Tune these if we tend to run out often etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define SEEQ_RX_BUFFERS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define SEEQ_TX_BUFFERS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define PKT_BUF_SZ 1584
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) sp->tx_old - sp->tx_new - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) (dma_addr_t)((unsigned long)(v) - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) (unsigned long)((sp)->rx_desc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Copy frames shorter than rx_copybreak, otherwise pass on up in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static int rx_copybreak = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct sgiseeq_rx_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) volatile struct hpc_dma_desc rdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u8 padding[PAD_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct sgiseeq_tx_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) volatile struct hpc_dma_desc tdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u8 padding[PAD_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Warning: This structure is laid out in a certain way because HPC dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * descriptors must be 8-byte aligned. So don't touch this without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * some care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct sgiseeq_init_block { /* Note the name ;-) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct sgiseeq_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct sgiseeq_init_block *srings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) dma_addr_t srings_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Ptrs to the descriptors in uncached space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct sgiseeq_rx_desc *rx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct sgiseeq_tx_desc *tx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct hpc3_ethregs *hregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct sgiseeq_regs *sregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Ring entry counters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned int rx_new, tx_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned int rx_old, tx_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int is_edlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned char control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned char mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) spinlock_t tx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) hregs->reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct sgiseeq_regs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) hregs->rx_ctrl = hregs->tx_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) hpc3_eth_reset(hregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static inline void seeq_go(struct sgiseeq_private *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct hpc3_ethregs *hregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct sgiseeq_regs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) sregs->rstat = sp->mode | RSTAT_GO_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static inline void __sgiseeq_set_mac_address(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct sgiseeq_regs *sregs = sp->sregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) sregs->tstat = SEEQ_TCMD_RB0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) sregs->rw.eth_addr[i] = dev->dev_addr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct sockaddr *sa = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) spin_lock_irq(&sp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) __sgiseeq_set_mac_address(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) spin_unlock_irq(&sp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static int seeq_init_ring(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) sp->rx_new = sp->tx_new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) sp->rx_old = sp->tx_old = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) __sgiseeq_set_mac_address(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* Setup tx ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) dma_sync_desc_dev(dev, &sp->tx_desc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* And now the rx ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!sp->rx_desc[i].skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) skb_reserve(skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dma_addr = dma_map_single(dev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) skb->data - 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) PKT_BUF_SZ, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) sp->rx_desc[i].skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) sp->rx_desc[i].rdma.pbuf = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) dma_sync_desc_dev(dev, &sp->rx_desc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static void seeq_purge_ring(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* clear tx ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (sp->tx_desc[i].skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) dev_kfree_skb(sp->tx_desc[i].skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) sp->tx_desc[i].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* And now the rx ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (sp->rx_desc[i].skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) dev_kfree_skb(sp->rx_desc[i].skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) sp->rx_desc[i].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static struct sgiseeq_private *gpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static struct net_device *gdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void sgiseeq_dump_rings(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static int once;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct sgiseeq_rx_desc *r = gpriv->rx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct sgiseeq_tx_desc *t = gpriv->tx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct hpc3_ethregs *hregs = gpriv->hregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (once)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) once++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) printk("RING DUMP:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) r[i].rdma.pnext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) i += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) r[i].rdma.pnext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) t[i].tdma.pnext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) i += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) t[i].tdma.pnext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct sgiseeq_regs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct hpc3_ethregs *hregs = sp->hregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) reset_hpc3_and_seeq(hregs, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) err = seeq_init_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Setup to field the proper interrupt types. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (sp->is_edlc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) sregs->tstat = TSTAT_INIT_EDLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) sregs->rw.wregs.control = sp->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) sregs->rw.wregs.frame_gap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) sregs->tstat = TSTAT_INIT_SEEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) seeq_go(sp, hregs, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static void record_rx_errors(struct net_device *dev, unsigned char status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (status & SEEQ_RSTAT_OVERF ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) status & SEEQ_RSTAT_SFRAME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) dev->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (status & SEEQ_RSTAT_CERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (status & SEEQ_RSTAT_DERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (status & SEEQ_RSTAT_REOF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static inline void rx_maybe_restart(struct sgiseeq_private *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct hpc3_ethregs *hregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct sgiseeq_regs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) seeq_go(sp, hregs, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct hpc3_ethregs *hregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct sgiseeq_regs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct sgiseeq_rx_desc *rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct sk_buff *newskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) unsigned char pkt_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) unsigned int orig_end = PREV_RX(sp->rx_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* Service every received packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) rd = &sp->rx_desc[sp->rx_new];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) dma_sync_desc_cpu(dev, rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) PKT_BUF_SZ, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) pkt_status = rd->skb->data[len];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (pkt_status & SEEQ_RSTAT_FIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* Packet is OK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* We don't want to receive our own packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (len > rx_copybreak) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) skb = rd->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!newskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) newskb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) goto memory_squeeze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) skb_reserve(newskb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) skb = netdev_alloc_skb_ip_align(dev, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) skb_copy_to_linear_data(skb, rd->skb->data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) newskb = rd->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) memory_squeeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) skb_put(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) dev->stats.rx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Silently drop my own packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) newskb = rd->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) record_rx_errors(dev, pkt_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) newskb = rd->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rd->skb = newskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) rd->rdma.pbuf = dma_map_single(dev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) newskb->data - 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) PKT_BUF_SZ, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Return the entry to the ring pool. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) rd->rdma.cntinfo = RCNTINFO_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) sp->rx_new = NEXT_RX(sp->rx_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) dma_sync_desc_dev(dev, rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) rd = &sp->rx_desc[sp->rx_new];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) dma_sync_desc_cpu(dev, rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dma_sync_desc_dev(dev, rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) rx_maybe_restart(sp, hregs, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct sgiseeq_regs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (sp->is_edlc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) sregs->rw.wregs.control = sp->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static inline void kick_tx(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct sgiseeq_private *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct hpc3_ethregs *hregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct sgiseeq_tx_desc *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int i = sp->tx_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* If the HPC aint doin nothin, and there are more packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * with ETXD cleared and XIU set we must make very certain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * that we restart the HPC else we risk locking up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * adapter. The following code is only safe iff the HPCDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * is not active!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) td = &sp->tx_desc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dma_sync_desc_cpu(dev, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) (HPCDMA_XIU | HPCDMA_ETXD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) i = NEXT_TX(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) td = &sp->tx_desc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) dma_sync_desc_cpu(dev, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (td->tdma.cntinfo & HPCDMA_XIU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) dma_sync_desc_dev(dev, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct hpc3_ethregs *hregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct sgiseeq_regs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct sgiseeq_tx_desc *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) unsigned long status = hregs->tx_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) tx_maybe_reset_collisions(sp, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* Oops, HPC detected some sort of error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (status & SEEQ_TSTAT_R16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (status & SEEQ_TSTAT_UFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (status & SEEQ_TSTAT_LCLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* Ack 'em... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) td = &sp->tx_desc[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) dma_sync_desc_cpu(dev, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) dma_sync_desc_dev(dev, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (!(status & HPC3_ETXCTRL_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) sp->tx_old = NEXT_TX(sp->tx_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) td->tdma.cntinfo |= HPCDMA_EOX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (td->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) dev_kfree_skb_any(td->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) td->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) dma_sync_desc_dev(dev, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct net_device *dev = (struct net_device *) dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct hpc3_ethregs *hregs = sp->hregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct sgiseeq_regs *sregs = sp->sregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) spin_lock(&sp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /* Ack the IRQ and set software state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) hregs->reset = HPC3_ERST_CLRIRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Always check for received packets. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) sgiseeq_rx(dev, sp, hregs, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* Only check for tx acks if we have something queued. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (sp->tx_old != sp->tx_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) sgiseeq_tx(dev, sp, hregs, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) spin_unlock(&sp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static int sgiseeq_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct sgiseeq_regs *sregs = sp->sregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) unsigned int irq = dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) err = init_seeq(dev, sp, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) goto out_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) out_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) free_irq(irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static int sgiseeq_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct sgiseeq_regs *sregs = sp->sregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) unsigned int irq = dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* Shutdown the Seeq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) reset_hpc3_and_seeq(sp->hregs, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) free_irq(irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) seeq_purge_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static inline int sgiseeq_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct sgiseeq_regs *sregs = sp->sregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) err = init_seeq(dev, sp, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct hpc3_ethregs *hregs = sp->hregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct sgiseeq_tx_desc *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int len, entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) spin_lock_irqsave(&sp->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* Setup... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (len < ETH_ZLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (skb_padto(skb, ETH_ZLEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) spin_unlock_irqrestore(&sp->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) len = ETH_ZLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) dev->stats.tx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) entry = sp->tx_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) td = &sp->tx_desc[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) dma_sync_desc_cpu(dev, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* Create entry. There are so many races with adding a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * descriptor to the chain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * 1) Assume that the HPC is off processing a DMA chain while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * we are changing all of the following.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * 2) Do no allow the HPC to look at a new descriptor until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * we have completely set up it's state. This means, do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * not clear HPCDMA_EOX in the current last descritptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * until the one we are adding looks consistent and could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * be processes right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * 3) The tx interrupt code must notice when we've added a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * entry and the HPC got to the end of the chain before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * added this new entry and restarted it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) td->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) td->tdma.cntinfo = (len & HPCDMA_BCNT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) dma_sync_desc_dev(dev, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (sp->tx_old != sp->tx_new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct sgiseeq_tx_desc *backend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) dma_sync_desc_cpu(dev, backend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) backend->tdma.cntinfo &= ~HPCDMA_EOX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dma_sync_desc_dev(dev, backend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* Maybe kick the HPC back into motion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) kick_tx(dev, sp, hregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (!TX_BUFFS_AVAIL(sp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) spin_unlock_irqrestore(&sp->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static void timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) sgiseeq_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static void sgiseeq_set_multicast(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) unsigned char oldmode = sp->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if(dev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) sp->mode = SEEQ_RCMD_RANY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) sp->mode = SEEQ_RCMD_RBMCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) sp->mode = SEEQ_RCMD_RBCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* XXX I know this sucks, but is there a better way to reprogram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * XXX the receiver? At least, this shouldn't happen too often.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (oldmode != sp->mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) sgiseeq_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static inline void setup_tx_ring(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct sgiseeq_tx_desc *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int nbufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) while (i < (nbufs - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) buf[i].tdma.pbuf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) dma_sync_desc_dev(dev, &buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dma_sync_desc_dev(dev, &buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static inline void setup_rx_ring(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct sgiseeq_rx_desc *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int nbufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) while (i < (nbufs - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) buf[i].rdma.pbuf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) dma_sync_desc_dev(dev, &buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) buf[i].rdma.pbuf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dma_sync_desc_dev(dev, &buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static const struct net_device_ops sgiseeq_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) .ndo_open = sgiseeq_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) .ndo_stop = sgiseeq_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) .ndo_start_xmit = sgiseeq_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) .ndo_tx_timeout = timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) .ndo_set_rx_mode = sgiseeq_set_multicast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) .ndo_set_mac_address = sgiseeq_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static int sgiseeq_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct hpc3_regs *hpcregs = pd->hpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct sgiseeq_init_block *sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) unsigned int irq = pd->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct sgiseeq_private *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dev = alloc_etherdev(sizeof (struct sgiseeq_private));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) platform_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* Make private data page aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) &sp->srings_dma, DMA_BIDIRECTIONAL, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (!sr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) goto err_out_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) sp->srings = sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) sp->rx_desc = sp->srings->rxvector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) sp->tx_desc = sp->srings->txvector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) spin_lock_init(&sp->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* A couple calculations now, saves many cycles later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) gpriv = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) gdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) sp->hregs = &hpcregs->ethregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) sp->name = sgiseeqstr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) sp->mode = SEEQ_RCMD_RBCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* Setup PIO and DMA transfer timing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) sp->hregs->pconfig = 0x161;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* Setup PIO and DMA transfer timing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) sp->hregs->pconfig = 0x161;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* Reset the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) hpc3_eth_reset(sp->hregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (sp->is_edlc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) SEEQ_CTRL_ENCARR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) dev->netdev_ops = &sgiseeq_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) dev->watchdog_timeo = (200 * HZ) / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) dev->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (register_netdev(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) printk(KERN_ERR "Sgiseeq: Cannot register net device, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) "aborting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) goto err_out_free_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) err_out_free_attrs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) sp->srings_dma, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) err_out_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static int sgiseeq_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct net_device *dev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct sgiseeq_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) sp->srings_dma, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static struct platform_driver sgiseeq_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) .probe = sgiseeq_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) .remove = sgiseeq_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) .name = "sgiseeq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) module_platform_driver(sgiseeq_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) MODULE_DESCRIPTION("SGI Seeq 8003 driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) MODULE_ALIAS("platform:sgiseeq");