^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* atarilance.c: Ethernet driver for VME Lance cards on the Atari */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) Written 1995/96 by Roman Hodek (Roman.Hodek@informatik.uni-erlangen.de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) This software may be used and distributed according to the terms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) of the GNU General Public License, incorporated herein by reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) This drivers was written with the following sources of reference:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) - The driver for the Riebl Lance card by the TU Vienna.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) - The modified TUW driver for PAM's VME cards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) - The PC-Linux driver for Lance cards (but this is for bus master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) cards, not the shared memory ones)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) - The Amiga Ariadne driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) v1.0: (in 1.2.13pl4/0.9.13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) Initial version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) v1.1: (in 1.2.13pl5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) more comments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) deleted some debugging stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) optimized register access (keep AREG pointing to CSR0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) following AMD, CSR0_STRT should be set only after IDON is detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) use memcpy() for data transfers, that also employs long word moves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) better probe procedure for 24-bit systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) non-VME-RieblCards need extra delays in memcpy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) must also do write test, since 0xfxe00000 may hit ROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) use 8/32 tx/rx buffers, which should give better NFS performance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) this is made possible by shifting the last packet buffer after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) RieblCard reserved area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) v1.2: (in 1.2.13pl8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) again fixed probing for the Falcon; 0xfe01000 hits phys. 0x00010000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) and thus RAM, in case of no Lance found all memory contents have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) be restored!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) Now possible to compile as module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) v1.3: 03/30/96 Jes Sorensen, Roman (in 1.3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) Several little 1.3 adaptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) When the lance is stopped it jumps back into little-endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) mode. It is therefore necessary to put it back where it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) belongs, in big endian mode, in order to make things work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) This might be the reason why multicast-mode didn't work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) before, but I'm not able to test it as I only got an Amiga
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) (we had similar problems with the A2065 driver).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static const char version[] = "atarilance.c: v1.3 04/04/96 "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) "Roman.Hodek@informatik.uni-erlangen.de\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <asm/atarihw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <asm/atariints.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Debug level:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * 0 = silent, print only serious errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * 1 = normal, print error messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * 2 = debug, print debug infos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * 3 = debug, print even more debug infos (packet data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define LANCE_DEBUG 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #ifdef LANCE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static int lance_debug = LANCE_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static int lance_debug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) module_param(lance_debug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* Print debug messages on probing? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #undef LANCE_DEBUG_PROBE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define DPRINTK(n,a) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (lance_debug >= n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) printk a; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) } while( 0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #ifdef LANCE_DEBUG_PROBE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) # define PROBE_PRINT(a) printk a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) # define PROBE_PRINT(a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* These define the number of Rx and Tx buffers as log2. (Only powers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * of two are valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Much more rx buffers (32) are reserved than tx buffers (8), since receiving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * is more time critical then sending and packets may have to remain in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * board's memory when main memory is low.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define TX_LOG_RING_SIZE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define RX_LOG_RING_SIZE 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* These are the derived values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define RX_RING_SIZE (1 << RX_LOG_RING_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define TX_TIMEOUT (HZ/5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* The LANCE Rx and Tx ring descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct lance_rx_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned short base; /* Low word of base addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) volatile unsigned char flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned char base_hi; /* High word of base addr (unused) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) short buf_length; /* This length is 2s complement! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) volatile short msg_length; /* This length is "normal". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct lance_tx_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned short base; /* Low word of base addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) volatile unsigned char flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned char base_hi; /* High word of base addr (unused) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) short length; /* Length is 2s complement! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) volatile short misc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct ringdesc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned short adr_lo; /* Low 16 bits of address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned char len; /* Length bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned char adr_hi; /* High 8 bits of address (unused) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* The LANCE initialization block, described in databook. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct lance_init_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned short mode; /* Pre-set mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned char hwaddr[6]; /* Physical ethernet address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned filter[2]; /* Multicast filter (unused). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Receive and transmit ring base, along with length bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct ringdesc rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct ringdesc tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* The whole layout of the Lance shared memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct lance_memory {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct lance_init_block init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct lance_tx_head tx_head[TX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct lance_rx_head rx_head[RX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) char packet_area[]; /* packet data follow after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * init block and the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * descriptors and are located
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * at runtime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* RieblCard specifics:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * The original TOS driver for these cards reserves the area from offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * 0xee70 to 0xeebb for storing configuration data. Of interest to us is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Ethernet address there, and the magic for verifying the data's validity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * The reserved area isn't touch by packet buffers. Furthermore, offset 0xfffe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * is reserved for the interrupt vector number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define RIEBL_RSVD_START 0xee70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define RIEBL_RSVD_END 0xeec0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define RIEBL_MAGIC 0x09051990
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define RIEBL_MAGIC_ADDR ((unsigned long *)(((char *)MEM) + 0xee8a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define RIEBL_HWADDR_ADDR ((unsigned char *)(((char *)MEM) + 0xee8e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define RIEBL_IVEC_ADDR ((unsigned short *)(((char *)MEM) + 0xfffe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* This is a default address for the old RieblCards without a battery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * that have no ethernet address at boot time. 00:00:36:04 is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * prefix for Riebl cards, the 00:00 at the end is arbitrary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static unsigned char OldRieblDefHwaddr[6] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 0x00, 0x00, 0x36, 0x04, 0x00, 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* I/O registers of the Lance chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct lance_ioreg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* base+0x0 */ volatile unsigned short data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* base+0x2 */ volatile unsigned short addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned char _dummy1[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* base+0x7 */ volatile unsigned char ivec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned char _dummy2[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* base+0xd */ volatile unsigned char eeprom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned char _dummy3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* base+0xf */ volatile unsigned char mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Types of boards this driver supports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) enum lance_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) OLD_RIEBL, /* old Riebl card without battery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) NEW_RIEBL, /* new Riebl card with battery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) PAM_CARD /* PAM card with EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static char *lance_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) "Riebl-Card (without battery)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) "Riebl-Card (with battery)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) "PAM intern card"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* The driver's private device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct lance_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) enum lance_type cardtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct lance_ioreg *iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct lance_memory *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int cur_rx, cur_tx; /* The next free ring entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int dirty_tx; /* Ring entries to be freed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* copy function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) void *(*memcpy_f)( void *, const void *, size_t );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* This must be long for set_bit() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) long tx_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) spinlock_t devlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* I/O register access macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #define MEM lp->mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #define DREG IO->data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #define AREG IO->addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define REGA(a) (*( AREG = (a), &DREG ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Definitions for packet buffer access: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define PKT_BUF_SZ 1544
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Get the address of a packet buffer corresponding to a given buffer head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #define PKTBUF_ADDR(head) (((unsigned char *)(MEM)) + (head)->base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* Possible memory/IO addresses for probing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static struct lance_addr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned long memaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned long ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int slow_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) } lance_addr_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) { 0xfe010000, 0xfe00fff0, 0 }, /* RieblCard VME in TT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) { 0xffc10000, 0xffc0fff0, 0 }, /* RieblCard VME in MegaSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) (highest byte stripped) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) { 0xffe00000, 0xffff7000, 1 }, /* RieblCard in ST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) (highest byte stripped) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) { 0xffd00000, 0xffff7000, 1 }, /* RieblCard in ST with hw modif. to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) avoid conflict with ROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) (highest byte stripped) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) { 0xffcf0000, 0xffcffff0, 0 }, /* PAMCard VME in TT and MSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) (highest byte stripped) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) { 0xfecf0000, 0xfecffff0, 0 }, /* Rhotron's PAMCard VME in TT and MSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) (highest byte stripped) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #define N_LANCE_ADDR ARRAY_SIZE(lance_addr_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Definitions for the Lance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* tx_head flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #define TMD1_ENP 0x01 /* end of packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define TMD1_STP 0x02 /* start of packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define TMD1_DEF 0x04 /* deferred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #define TMD1_ONE 0x08 /* one retry needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #define TMD1_MORE 0x10 /* more than one retry needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) #define TMD1_ERR 0x40 /* error summary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #define TMD1_OWN 0x80 /* ownership (set: chip owns) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #define TMD1_OWN_CHIP TMD1_OWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #define TMD1_OWN_HOST 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* tx_head misc field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #define TMD3_RTRY 0x0400 /* failed after 16 retries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #define TMD3_LCAR 0x0800 /* carrier lost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #define TMD3_LCOL 0x1000 /* late collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define TMD3_UFLO 0x4000 /* underflow (late memory) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #define TMD3_BUFF 0x8000 /* buffering error (no ENP) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* rx_head flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #define RMD1_ENP 0x01 /* end of packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #define RMD1_STP 0x02 /* start of packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #define RMD1_BUFF 0x04 /* buffer error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #define RMD1_CRC 0x08 /* CRC error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #define RMD1_OFLO 0x10 /* overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #define RMD1_FRAM 0x20 /* framing error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #define RMD1_ERR 0x40 /* error summary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #define RMD1_OWN 0x80 /* ownership (set: ship owns) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #define RMD1_OWN_CHIP RMD1_OWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #define RMD1_OWN_HOST 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* register names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #define CSR0 0 /* mode/status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #define CSR1 1 /* init block addr (low) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #define CSR2 2 /* init block addr (high) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #define CSR3 3 /* misc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #define CSR8 8 /* address filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #define CSR15 15 /* promiscuous mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* CSR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* (R=readable, W=writeable, S=set on write, C=clear on write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #define CSR0_INIT 0x0001 /* initialize (RS) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #define CSR0_STRT 0x0002 /* start (RS) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #define CSR0_STOP 0x0004 /* stop (RS) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #define CSR0_TDMD 0x0008 /* transmit demand (RS) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #define CSR0_TXON 0x0010 /* transmitter on (R) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #define CSR0_RXON 0x0020 /* receiver on (R) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #define CSR0_INEA 0x0040 /* interrupt enable (RW) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #define CSR0_INTR 0x0080 /* interrupt active (R) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #define CSR0_IDON 0x0100 /* initialization done (RC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #define CSR0_RINT 0x0400 /* receiver interrupt (RC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #define CSR0_MERR 0x0800 /* memory error (RC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #define CSR0_MISS 0x1000 /* missed frame (RC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #define CSR0_ERR 0x8000 /* error (RC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* CSR3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #define CSR3_BCON 0x0001 /* byte control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #define CSR3_ACON 0x0002 /* ALE control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /***************************** Prototypes *****************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static unsigned long lance_probe1( struct net_device *dev, struct lance_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *init_rec );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static int lance_open( struct net_device *dev );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static void lance_init_ring( struct net_device *dev );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static irqreturn_t lance_interrupt( int irq, void *dev_id );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static int lance_rx( struct net_device *dev );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static int lance_close( struct net_device *dev );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void set_multicast_list( struct net_device *dev );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static int lance_set_mac_address( struct net_device *dev, void *addr );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /************************* End of Prototypes **************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void *slow_memcpy( void *dst, const void *src, size_t len )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) { char *cto = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) const char *cfrom = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) while( len-- ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) *cto++ = *cfrom++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) MFPDELAY();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct net_device * __init atarilance_probe(int unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static int found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!MACH_IS_ATARI || found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Assume there's only one board possible... That seems true, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * the Riebl/PAM board's address cannot be changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dev = alloc_etherdev(sizeof(struct lance_private));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (unit >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) sprintf(dev->name, "eth%d", unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) netdev_boot_setup_check(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) for( i = 0; i < N_LANCE_ADDR; ++i ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (lance_probe1( dev, &lance_addr_list[i] )) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* Derived from hwreg_present() in atari/config.c: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static noinline int __init addr_accessible(volatile void *regp, int wordflag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int writeflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) long *vbr, save_berr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) __asm__ __volatile__ ( "movec %/vbr,%0" : "=r" (vbr) : );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) save_berr = vbr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) __asm__ __volatile__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ( "movel %/sp,%/d1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) "movel #Lberr,%2@\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) "moveq #0,%0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) "tstl %3\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) "bne 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) "moveb %1@,%/d0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) "nop \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) "bra 2f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) "1: movew %1@,%/d0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) "nop \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) "2: tstl %4\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) "beq 2f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) "tstl %3\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) "bne 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) "clrb %1@\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) "nop \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) "moveb %/d0,%1@\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) "nop \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) "bra 2f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) "1: clrw %1@\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) "nop \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) "movew %/d0,%1@\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) "nop \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) "2: moveq #1,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) "Lberr: movel %/d1,%/sp"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) : "=&d" (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) : "a" (regp), "a" (&vbr[2]), "rm" (wordflag), "rm" (writeflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) : "d0", "d1", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) vbr[2] = save_berr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static const struct net_device_ops lance_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) .ndo_open = lance_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) .ndo_stop = lance_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) .ndo_start_xmit = lance_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) .ndo_set_rx_mode = set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) .ndo_set_mac_address = lance_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .ndo_tx_timeout = lance_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static unsigned long __init lance_probe1( struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct lance_addr *init_rec )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) volatile unsigned short *memaddr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) (volatile unsigned short *)init_rec->memaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) volatile unsigned short *ioaddr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) (volatile unsigned short *)init_rec->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct lance_private *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct lance_ioreg *IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static int did_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) unsigned short save1, save2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) (long)memaddr, (long)ioaddr ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* Test whether memory readable and writable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) PROBE_PRINT(( "lance_probe1: testing memory to be accessible\n" ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!addr_accessible( memaddr, 1, 1 )) goto probe_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* Written values should come back... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) PROBE_PRINT(( "lance_probe1: testing memory to be writable (1)\n" ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) save1 = *memaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) *memaddr = 0x0001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (*memaddr != 0x0001) goto probe_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) PROBE_PRINT(( "lance_probe1: testing memory to be writable (2)\n" ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) *memaddr = 0x0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (*memaddr != 0x0000) goto probe_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) *memaddr = save1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /* First port should be readable and writable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) PROBE_PRINT(( "lance_probe1: testing ioport to be accessible\n" ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!addr_accessible( ioaddr, 1, 1 )) goto probe_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* and written values should be readable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) PROBE_PRINT(( "lance_probe1: testing ioport to be writeable\n" ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) save2 = ioaddr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ioaddr[1] = 0x0001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (ioaddr[1] != 0x0001) goto probe_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* The CSR0_INIT bit should not be readable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) PROBE_PRINT(( "lance_probe1: testing CSR0 register function (1)\n" ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) save1 = ioaddr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ioaddr[1] = CSR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) ioaddr[0] = CSR0_INIT | CSR0_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (ioaddr[0] != CSR0_STOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ioaddr[0] = save1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ioaddr[1] = save2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) goto probe_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) PROBE_PRINT(( "lance_probe1: testing CSR0 register function (2)\n" ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ioaddr[0] = CSR0_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (ioaddr[0] != CSR0_STOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ioaddr[0] = save1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ioaddr[1] = save2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) goto probe_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /* Now ok... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) PROBE_PRINT(( "lance_probe1: Lance card detected\n" ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) goto probe_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) probe_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) probe_ok:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) MEM = (struct lance_memory *)memaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) IO = lp->iobase = (struct lance_ioreg *)ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) dev->base_addr = (unsigned long)ioaddr; /* informational only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) lp->memcpy_f = init_rec->slow_flag ? slow_memcpy : memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) REGA( CSR0 ) = CSR0_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* Now test for type: If the eeprom I/O port is readable, it is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * PAM card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (addr_accessible( &(IO->eeprom), 0, 0 )) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* Switch back to Ram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) i = IO->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) lp->cardtype = PAM_CARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) else if (*RIEBL_MAGIC_ADDR == RIEBL_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) lp->cardtype = NEW_RIEBL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) lp->cardtype = OLD_RIEBL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (lp->cardtype == PAM_CARD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) memaddr == (unsigned short *)0xffe00000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* PAMs card and Riebl on ST use level 5 autovector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (request_irq(IRQ_AUTO_5, lance_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) "PAM,Riebl-ST Ethernet", dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) dev->irq = IRQ_AUTO_5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* For VME-RieblCards, request a free VME int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) unsigned int irq = atari_register_vme_int();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (!irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) printk( "Lance: request for VME interrupt failed\n" );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (request_irq(irq, lance_interrupt, 0, "Riebl-VME Ethernet",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) printk( "Lance: request for irq %u failed\n", irq );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dev->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) printk("%s: %s at io %#lx, mem %#lx, irq %d%s, hwaddr ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) dev->name, lance_names[lp->cardtype],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) (unsigned long)ioaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) (unsigned long)memaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) dev->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) init_rec->slow_flag ? " (slow memcpy)" : "" );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* Get the ethernet address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) switch( lp->cardtype ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) case OLD_RIEBL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* No ethernet address! (Set some default address) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) case NEW_RIEBL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) case PAM_CARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) i = IO->eeprom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) for( i = 0; i < 6; ++i )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) dev->dev_addr[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) ((((unsigned short *)MEM)[i*2+1] & 0x0f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) i = IO->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) printk("%pM\n", dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (lp->cardtype == OLD_RIEBL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) printk( "%s: Warning: This is a default ethernet address!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) dev->name );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) printk( " Use \"ifconfig hw ether ...\" to set the address.\n" );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) spin_lock_init(&lp->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) MEM->init.mode = 0x0000; /* Disable Rx and Tx. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) for( i = 0; i < 6; i++ )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) MEM->init.filter[0] = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) MEM->init.filter[1] = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) MEM->init.rx_ring.adr_lo = offsetof( struct lance_memory, rx_head );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) MEM->init.rx_ring.adr_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) MEM->init.rx_ring.len = RX_RING_LEN_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) MEM->init.tx_ring.adr_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) MEM->init.tx_ring.len = TX_RING_LEN_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (lp->cardtype == PAM_CARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) IO->ivec = IRQ_SOURCE_TO_VECTOR(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) *RIEBL_IVEC_ADDR = IRQ_SOURCE_TO_VECTOR(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (did_version++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) DPRINTK( 1, ( version ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) dev->netdev_ops = &lance_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* XXX MSch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) dev->watchdog_timeo = TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static int lance_open( struct net_device *dev )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct lance_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct lance_ioreg *IO = lp->iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) lance_init_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* Re-initialize the LANCE, and start it when done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) REGA( CSR2 ) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) REGA( CSR1 ) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) REGA( CSR0 ) = CSR0_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* From now on, AREG is kept to point to CSR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) i = 1000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) while (--i > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (DREG & CSR0_IDON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (i <= 0 || (DREG & CSR0_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) dev->name, i, DREG ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) DREG = CSR0_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) DREG = CSR0_IDON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) DREG = CSR0_STRT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) DREG = CSR0_INEA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) netif_start_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Initialize the LANCE Rx and Tx rings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static void lance_init_ring( struct net_device *dev )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct lance_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) unsigned offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) lp->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) lp->cur_rx = lp->cur_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) lp->dirty_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) offset = offsetof( struct lance_memory, packet_area );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* If the packet buffer at offset 'o' would conflict with the reserved area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * of RieblCards, advance it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) #define CHECK_OFFSET(o) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (lp->cardtype == OLD_RIEBL || lp->cardtype == NEW_RIEBL) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (((o) < RIEBL_RSVD_START) ? (o)+PKT_BUF_SZ > RIEBL_RSVD_START \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) : (o) < RIEBL_RSVD_END) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) (o) = RIEBL_RSVD_END; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) for( i = 0; i < TX_RING_SIZE; i++ ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) CHECK_OFFSET(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) MEM->tx_head[i].base = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) MEM->tx_head[i].flag = TMD1_OWN_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) MEM->tx_head[i].base_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) MEM->tx_head[i].length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) MEM->tx_head[i].misc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) offset += PKT_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) for( i = 0; i < RX_RING_SIZE; i++ ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) CHECK_OFFSET(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) MEM->rx_head[i].base = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) MEM->rx_head[i].flag = TMD1_OWN_CHIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) MEM->rx_head[i].base_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) MEM->rx_head[i].buf_length = -PKT_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) MEM->rx_head[i].msg_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) offset += PKT_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct lance_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct lance_ioreg *IO = lp->iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) AREG = CSR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) dev->name, DREG ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) DREG = CSR0_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * Always set BSWP after a STOP as STOP puts it back into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * little endian mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) #ifndef final_version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) { int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) lp->dirty_tx, lp->cur_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) lp->tx_full ? " (full)" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) lp->cur_rx ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) for( i = 0 ; i < RX_RING_SIZE; i++ )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) DPRINTK( 2, ( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) i, MEM->rx_head[i].base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) -MEM->rx_head[i].buf_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) MEM->rx_head[i].msg_length ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) for( i = 0 ; i < TX_RING_SIZE; i++ )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) DPRINTK( 2, ( "tx #%d: base=%04x len=%04x misc=%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) i, MEM->tx_head[i].base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) -MEM->tx_head[i].length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) MEM->tx_head[i].misc ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* XXX MSch: maybe purge/reinit ring here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* lance_restart, essentially */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) lance_init_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct lance_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct lance_ioreg *IO = lp->iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) int entry, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct lance_tx_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) dev->name, DREG ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* The old LANCE chips doesn't automatically pad buffers to min. size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (len < ETH_ZLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) len = ETH_ZLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* PAM-Card has a bug: Can only send packets with even number of bytes! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) else if (lp->cardtype == PAM_CARD && (len & 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ++len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (len > skb->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (skb_padto(skb, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) netif_stop_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* Fill in a Tx ring entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (lance_debug >= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) printk( "%s: TX pkt type 0x%04x from %pM to %pM"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) " data at 0x%08x len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) dev->name, ((u_short *)skb->data)[6],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) &skb->data[6], skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) (int)skb->data, (int)skb->len );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /* We're not prepared for the int until the last flags are set/reset. And
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * the int may happen already after setting the OWN_CHIP... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) spin_lock_irqsave (&lp->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Mask to ring buffer boundary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) entry = lp->cur_tx & TX_RING_MOD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) head = &(MEM->tx_head[entry]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* Caution: the write order is important here, set the "ownership" bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) head->length = -len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) head->misc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) dev_kfree_skb( skb );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) lp->cur_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) lp->cur_tx -= TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) lp->dirty_tx -= TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* Trigger an immediate send poll. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) DREG = CSR0_INEA | CSR0_TDMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) TMD1_OWN_HOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) netif_start_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) lp->tx_full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) spin_unlock_irqrestore (&lp->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* The LANCE interrupt handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) static irqreturn_t lance_interrupt( int irq, void *dev_id )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct lance_private *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) struct lance_ioreg *IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int csr0, boguscnt = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (dev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) IO = lp->iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) spin_lock (&lp->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) AREG = CSR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) while( ((csr0 = DREG) & (CSR0_ERR | CSR0_TINT | CSR0_RINT)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) --boguscnt >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* Acknowledge all of the current interrupt sources ASAP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) DREG = csr0 & ~(CSR0_INIT | CSR0_STRT | CSR0_STOP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) CSR0_TDMD | CSR0_INEA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) dev->name, csr0, DREG ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (csr0 & CSR0_RINT) /* Rx interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) lance_rx( dev );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (csr0 & CSR0_TINT) { /* Tx-done interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) int dirty_tx = lp->dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) while( dirty_tx < lp->cur_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) int entry = dirty_tx & TX_RING_MOD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) int status = MEM->tx_head[entry].flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (status & TMD1_OWN_CHIP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) break; /* It still hasn't been Txed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) MEM->tx_head[entry].flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (status & TMD1_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* There was an major error, log it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) int err_status = MEM->tx_head[entry].misc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (err_status & TMD3_RTRY) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (err_status & TMD3_LCAR) dev->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (err_status & TMD3_LCOL) dev->stats.tx_window_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (err_status & TMD3_UFLO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /* Ackk! On FIFO errors the Tx unit is turned off! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /* Remove this verbosity later! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) dev->name, csr0 ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* Restart the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) DREG = CSR0_STRT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /* XXX MSch: free skb?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) dirty_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) #ifndef final_version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) DPRINTK( 0, ( "out-of-sync dirty pointer,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) " %d vs. %d, full=%ld.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) dirty_tx, lp->cur_tx, lp->tx_full ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) dirty_tx += TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (lp->tx_full && (netif_queue_stopped(dev)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* The ring is no longer full, clear tbusy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) lp->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) netif_wake_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) lp->dirty_tx = dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* Log misc errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (csr0 & CSR0_MERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) "status %04x.\n", dev->name, csr0 ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* Restart the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) DREG = CSR0_STRT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /* Clear any other interrupt, and set interrupt enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) CSR0_IDON | CSR0_INEA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) dev->name, DREG ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) spin_unlock (&lp->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) static int lance_rx( struct net_device *dev )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct lance_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int entry = lp->cur_rx & RX_RING_MOD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) MEM->rx_head[entry].flag ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /* If we own the next entry, it's a new packet. Send it up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct lance_rx_head *head = &(MEM->rx_head[entry]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) int status = head->flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /* There is a tricky error noted by John Murphy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) <murf@perftech.com> to Russ Nelson: Even with full-sized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) buffers it's possible for a jabber packet to use two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) buffers, with only the last correctly noting the error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (status & RMD1_ENP) /* Only count a general error at the */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) dev->stats.rx_errors++; /* end of a packet.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (status & RMD1_FRAM) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (status & RMD1_OFLO) dev->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (status & RMD1_CRC) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) head->flag &= (RMD1_ENP|RMD1_STP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /* Malloc up new buffer, compatible with net-3. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) short pkt_len = head->msg_length & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (pkt_len < 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) printk( "%s: Runt packet!\n", dev->name );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) skb = netdev_alloc_skb(dev, pkt_len + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) for( i = 0; i < RX_RING_SIZE; i++ )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) RMD1_OWN_CHIP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (i > RX_RING_SIZE - 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) head->flag |= RMD1_OWN_CHIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) lp->cur_rx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (lance_debug >= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) u_char *data = PKTBUF_ADDR(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %pM to %pM "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) "data %8ph len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) dev->name, ((u_short *)data)[6],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) &data[6], data, &data[15], pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) skb_reserve( skb, 2 ); /* 16 byte align */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) skb_put( skb, pkt_len ); /* Make room */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) skb->protocol = eth_type_trans( skb, dev );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) netif_rx( skb );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) dev->stats.rx_bytes += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) head->flag |= RMD1_OWN_CHIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) lp->cur_rx &= RX_RING_MOD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /* From lance.c (Donald Becker): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* We should check that at least two ring entries are free. If not,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) we should free one and mark stats->rx_dropped++. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static int lance_close( struct net_device *dev )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct lance_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct lance_ioreg *IO = lp->iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) netif_stop_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) AREG = CSR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) dev->name, DREG ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /* We stop the LANCE here -- it occasionally polls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) memory if we don't. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) DREG = CSR0_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /* Set or clear the multicast filter for this adaptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) num_addrs == -1 Promiscuous mode, receive all packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) num_addrs == 0 Normal mode, clear multicast list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) num_addrs > 0 Multicast mode, receive normal and MC packets, and do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) best-effort filtering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static void set_multicast_list( struct net_device *dev )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) struct lance_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct lance_ioreg *IO = lp->iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /* Only possible if board is already started */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /* We take the simple way out and always enable promiscuous mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) DREG = CSR0_STOP; /* Temporarily stop the lance. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* Log any net taps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) DPRINTK( 2, ( "%s: Promiscuous mode enabled.\n", dev->name ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) short multicast_table[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) int num_addrs = netdev_mc_count(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* We don't use the multicast table, but rely on upper-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * filtering. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) memset( multicast_table, (num_addrs == 0) ? 0 : -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) sizeof(multicast_table) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) for( i = 0; i < 4; i++ )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) REGA( CSR8+i ) = multicast_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) REGA( CSR15 ) = 0; /* Unset promiscuous mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * Always set BSWP after a STOP as STOP puts it back into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * little endian mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /* Resume normal operation and reset AREG to CSR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* This is needed for old RieblCards and possible for new RieblCards */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static int lance_set_mac_address( struct net_device *dev, void *addr )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct lance_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct sockaddr *saddr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* Only possible while card isn't started */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) dev->name ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) for( i = 0; i < 6; i++ )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* set also the magic for future sessions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) *RIEBL_MAGIC_ADDR = RIEBL_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) #ifdef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static struct net_device *atarilance_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static int __init atarilance_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) atarilance_dev = atarilance_probe(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return PTR_ERR_OR_ZERO(atarilance_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static void __exit atarilance_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) unregister_netdev(atarilance_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) free_irq(atarilance_dev->irq, atarilance_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) free_netdev(atarilance_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) module_init(atarilance_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) module_exit(atarilance_module_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) #endif /* MODULE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * Local variables:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * c-indent-level: 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * tab-width: 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * End:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) */