^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) munged into HPPA boxen .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) This driver is based upon 82596.c, original credits are below...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) but there were too many hoops which HP wants jumped through to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) keep this code in there in a sane manner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) 3 primary sources of the mess --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) 1) hppa needs *lots* of cacheline flushing to keep this kind of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) MMIO running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) 2) The 82596 needs to see all of its pointers as their physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) address. Thus virt_to_bus/bus_to_virt are *everywhere*.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) 3) The implementation HP is using seems to be significantly pickier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) about when and how the command and RX units are started. some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) command ordering was changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) Examination of the mach driver leads one to believe that there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) might be a saner way to pull this off... anyone who feels like a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) full rewrite can be my guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) 03/02/2000 changes for better/correct(?) cache-flushing (deller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* 82596.c: A generic 82596 ethernet driver for linux. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) Based on Apricot.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) Written 1994 by Mark Evans.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) This driver is for the Apricot 82596 bus-master interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) Modularised 12/94 Mark Evans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) Modified to support the 82596 ethernet chips on 680x0 VME boards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) by Richard Hirst <richard@sleepie.demon.co.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) Renamed to be 82596.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) 980825: Changed to receive directly in to sk_buffs which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) allocated at open() time. Eliminates copy on incoming frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) (small ones are still copied). Shared data now held in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) non-cached page, so we can run on 68060 in copyback mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) TBD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * look at deferring rx frames rather than discarding (as per tulip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * handle tx ring full as per tulip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * performance test to tune rx_copybreak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) Most of my modifications relate to the braindead big-endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) implementation by Intel. When the i596 is operating in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) 'big-endian' mode, it thinks a 32 bit value of 0x12345678
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) should be stored as 0x56781234. This is a real pain, when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) you have linked lists which are shared by the 680x0 and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) i596.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) Driver skeleton
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) Written 1993 by Donald Becker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) Copyright 1993 United States Government as represented by the Director,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) National Security Agency. This software may only be used and distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) according to the terms of the GNU General Public License as modified by SRC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) incorporated herein by reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) The author may be reached as becker@scyld.com, or C/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* DEBUG flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define DEB_INIT 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define DEB_PROBE 0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define DEB_SERIOUS 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define DEB_ERRORS 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define DEB_MULTI 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define DEB_TDR 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define DEB_OPEN 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define DEB_RESET 0x0080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define DEB_ADDCMD 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define DEB_STATUS 0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define DEB_STARTTX 0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define DEB_RXADDR 0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define DEB_TXADDR 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define DEB_RXFRAME 0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define DEB_INTS 0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define DEB_STRUCT 0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define DEB_ANY 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define DEB(x, y) if (i596_debug & (x)) { y; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * The MPU_PORT command allows direct access to the 82596. With PORT access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * the following commands are available (p5-18). The 32-bit port command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * must be word-swapped with the most significant word written first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * This only applies to VME boards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define PORT_RESET 0x00 /* reset 82596 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define PORT_SELFTEST 0x01 /* selftest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define PORT_ALTSCP 0x02 /* alternate SCB address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Copy frames shorter than rx_copybreak, otherwise pass on up in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static int rx_copybreak = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define PKT_BUF_SZ 1536
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define MAX_MC_CNT 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define ISCP_BUSY 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define I596_NULL ((u32)0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define CMD_EOL 0x8000 /* The last command of the list, stop. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define CMD_FLEX 0x0008 /* Enable flexible memory model */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) enum commands {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define STAT_C 0x8000 /* Set to 0 after execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define STAT_B 0x4000 /* Command being executed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define STAT_OK 0x2000 /* Command executed ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define STAT_A 0x1000 /* Command aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define CUC_START 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define CUC_RESUME 0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define CUC_SUSPEND 0x0300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define CUC_ABORT 0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define RX_START 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define RX_RESUME 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define RX_SUSPEND 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define RX_ABORT 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define TX_TIMEOUT (HZ/20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct i596_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned short porthi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned short portlo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u32 ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define EOF 0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define SIZE_MASK 0x3fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct i596_tbd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned short size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned short pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u32 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u32 cache_pad[5]; /* Total 32 bytes... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* The command structure has two 'next' pointers; v_next is the address of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * the next command as seen by the CPU, b_next is the address of the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * command as seen by the 82596. The b_next pointer, as used by the 82596
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * always references the status field of the next command, rather than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * v_next field, because the 82596 is unaware of v_next. It may seem more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * logical to put v_next at the end of the structure, but we cannot do that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * because the 82596 expects other fields to be there, depending on command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct i596_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct i596_cmd *v_next; /* Address from CPUs viewpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned short command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) u32 b_next; /* Address from i596 viewpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct tx_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct i596_cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u32 tbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned short size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned short pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct sk_buff *skb; /* So we can free it after tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #ifdef __LP64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u32 cache_pad[6]; /* Total 64 bytes... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u32 cache_pad[1]; /* Total 32 bytes... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct tdr_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct i596_cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) unsigned short pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct mc_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct i596_cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) short mc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) char mc_addrs[MAX_MC_CNT*6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct sa_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct i596_cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) char eth_addr[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct cf_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct i596_cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) char i596_config[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct i596_rfd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned short stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) unsigned short cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u32 b_next; /* Address from i596 viewpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) u32 rbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) unsigned short count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) unsigned short size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct i596_rfd *v_next; /* Address from CPUs viewpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct i596_rfd *v_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #ifndef __LP64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u32 cache_pad[2]; /* Total 32 bytes... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct i596_rbd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* hardware data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) unsigned short count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) unsigned short zero1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 b_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) u32 b_data; /* Address from i596 viewpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) unsigned short size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned short zero2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* driver data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct i596_rbd *v_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u32 b_addr; /* This rbd addr from i596 view */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) unsigned char *v_data; /* Address from CPUs viewpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Total 32 bytes... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #ifdef __LP64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) u32 cache_pad[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* These values as chosen so struct i596_dma fits in one page... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define TX_RING_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define RX_RING_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct i596_scb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned short command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u32 rfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u32 crc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u32 align_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u32 resource_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u32 over_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u32 rcvdt_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u32 short_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned short t_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned short t_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct i596_iscp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) u32 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) u32 scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct i596_scp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u32 sysbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) u32 iscp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct i596_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct i596_scp scp __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) volatile struct i596_iscp iscp __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) volatile struct i596_scb scb __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct sa_cmd sa_cmd __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct cf_cmd cf_cmd __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct mc_cmd mc_cmd __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct i596_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct i596_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) u32 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int last_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct i596_rfd *rfd_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct i596_rbd *rbd_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct i596_cmd *cmd_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct i596_cmd *cmd_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) int cmd_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) u32 last_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int next_tx_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) spinlock_t lock; /* serialize access to chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) void __iomem *mpu_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) void __iomem *ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static const char init_setup[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 0x8E, /* length, prefetch on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 0xC8, /* fifo to 8, monitor off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 0x80, /* don't save bad frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 0x2E, /* No source address insertion, 8 byte preamble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 0x00, /* priority and backoff defaults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 0x60, /* interframe spacing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 0x00, /* slot time LSB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 0xf2, /* slot time and retries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 0x00, /* promiscuous mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 0x00, /* collision detect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 0x40, /* minimum frame length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 0x7f /* *multi IA */ };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static int i596_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static irqreturn_t i596_interrupt(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static int i596_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static void print_eth(unsigned char *buf, char *str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void set_multicast_list(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static inline void ca(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int rx_ring_size = RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static int ticks_limit = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static int max_cmd_backlog = TX_RING_SIZE-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void i596_poll_controller(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #ifdef NONCOHERENT_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dma_sync_single_for_device(ndev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) virt_to_dma(netdev_priv(ndev), addr), len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dma_sync_single_for_cpu(ndev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) virt_to_dma(netdev_priv(ndev), addr), len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #endif /* NONCOHERENT_DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) while (--delcnt && dma->iscp.stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (!delcnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) dev->name, str, SWAP16(dma->iscp.stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) while (--delcnt && dma->scb.command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (!delcnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) dev->name, str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) SWAP16(dma->scb.status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) SWAP16(dma->scb.command));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void i596_display_data(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct i596_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct i596_dma *dma = lp->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct i596_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct i596_rfd *rfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct i596_rbd *rbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) " .cmd = %08x, .rfd = %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) printk(KERN_DEBUG " errors: crc %x, align %x, resource %x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) " over %x, rcvdt %x, short %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) cmd = lp->cmd_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) while (cmd != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) "cmd at %p, .status = %04x, .command = %04x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) " .b_next = %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) cmd, SWAP16(cmd->status), SWAP16(cmd->command),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) SWAP32(cmd->b_next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) cmd = cmd->v_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) rfd = lp->rfd_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) printk(KERN_DEBUG "rfd_head = %p\n", rfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) " count %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) SWAP32(rfd->b_next), SWAP32(rfd->rbd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) SWAP16(rfd->count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) rfd = rfd->v_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) } while (rfd != lp->rfd_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) rbd = lp->rbd_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) printk(KERN_DEBUG "rbd_head = %p\n", rbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) " %p .count %04x, b_next %08x, b_data %08x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) " size %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) SWAP32(rbd->b_data), SWAP16(rbd->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) rbd = rbd->v_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) } while (rbd != lp->rbd_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) dma_sync_cpu(dev, dma, sizeof(struct i596_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static inline int init_rx_bufs(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct i596_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct i596_dma *dma = lp->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct i596_rfd *rfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct i596_rbd *rbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /* First build the Receive Buffer Descriptor List */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) dma_addr = dma_map_single(dev->dev.parent, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) PKT_BUF_SZ, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) rbd->v_next = rbd+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) rbd->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) rbd->v_data = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) rbd->b_data = SWAP32(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) rbd->size = SWAP16(PKT_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) lp->rbd_head = dma->rbds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) rbd = dma->rbds + rx_ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) rbd->v_next = dma->rbds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* Now build the Receive Frame Descriptor List */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) rfd->rbd = I596_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) rfd->v_next = rfd+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) rfd->v_prev = rfd-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) rfd->cmd = SWAP16(CMD_FLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) lp->rfd_head = dma->rfds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) rfd = dma->rfds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) rfd->v_prev = dma->rfds + rx_ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) rfd = dma->rfds + rx_ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) rfd->v_next = dma->rfds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dma_sync_dev(dev, dma, sizeof(struct i596_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static inline void remove_rx_bufs(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct i596_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct i596_rbd *rbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (rbd->skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) dma_unmap_single(dev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) (dma_addr_t)SWAP32(rbd->b_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) PKT_BUF_SZ, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) dev_kfree_skb(rbd->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static void rebuild_rx_bufs(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct i596_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct i596_dma *dma = lp->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* Ensure rx frame/buffer descriptors are tidy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) for (i = 0; i < rx_ring_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dma->rfds[i].rbd = I596_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) dma->rfds[i].cmd = SWAP16(CMD_FLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) lp->rfd_head = dma->rfds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) lp->rbd_head = dma->rbds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) dma_sync_dev(dev, dma, sizeof(struct i596_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static int init_i596_mem(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct i596_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct i596_dma *dma = lp->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) mpu_port(dev, PORT_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) udelay(100); /* Wait 100us - seems to help */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* change the scp address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) lp->last_cmd = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) dma->scp.sysbus = SYSBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dma->iscp.stat = SWAP32(ISCP_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) lp->cmd_backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) lp->cmd_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) dma->scb.cmd = I596_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) dma_sync_dev(dev, &(dma->scp), sizeof(struct i596_scp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) dma_sync_dev(dev, &(dma->iscp), sizeof(struct i596_iscp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ca(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (wait_istat(dev, dma, 1000, "initialization timed out"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) DEB(DEB_INIT, printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) "%s: i82596 initialization successful\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /* Ensure rx frame/buffer descriptors are tidy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) rebuild_rx_bufs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) dma->scb.command = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) DEB(DEB_INIT, printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) "%s: queuing CmdConfigure\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) memcpy(dma->cf_cmd.i596_config, init_setup, 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) dma_sync_dev(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) i596_add_cmd(dev, &dma->cf_cmd.cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) dma_sync_dev(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) i596_add_cmd(dev, &dma->sa_cmd.cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) dma_sync_dev(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) i596_add_cmd(dev, &dma->tdr_cmd.cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) spin_lock_irqsave (&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) spin_unlock_irqrestore (&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) goto failed_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) dma->scb.command = SWAP16(RX_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ca(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) spin_unlock_irqrestore (&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) goto failed_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) DEB(DEB_INIT, printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) "%s: Receive unit started OK\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) failed_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) mpu_port(dev, PORT_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static inline int i596_rx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct i596_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct i596_rfd *rfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct i596_rbd *rbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int frames = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) DEB(DEB_RXFRAME, printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) "i596_rx(), rfd_head %p, rbd_head %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) lp->rfd_head, lp->rbd_head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) rfd = lp->rfd_head; /* Ref next frame to check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (rfd->rbd == I596_NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) rbd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) else if (rfd->rbd == lp->rbd_head->b_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) rbd = lp->rbd_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) dma_sync_cpu(dev, rbd, sizeof(struct i596_rbd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* XXX Now what? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) rbd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) DEB(DEB_RXFRAME, printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) " rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) rfd, rfd->rbd, rfd->stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /* a good frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int pkt_len = SWAP16(rbd->count) & 0x3fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct sk_buff *skb = rbd->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int rx_in_place = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) frames++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* Check if the packet is long enough to just accept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * without copying to a properly sized skbuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (pkt_len > rx_copybreak) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct sk_buff *newskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) dma_unmap_single(dev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) (dma_addr_t)SWAP32(rbd->b_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) PKT_BUF_SZ, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* Get fresh skbuff to replace filled one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) newskb = netdev_alloc_skb_ip_align(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) PKT_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (newskb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) skb = NULL; /* drop pkt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) goto memory_squeeze;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Pass up the skb already on the Rx ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) skb_put(skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) rx_in_place = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) rbd->skb = newskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) dma_addr = dma_map_single(dev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) newskb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) PKT_BUF_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) rbd->v_data = newskb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) rbd->b_data = SWAP32(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) skb = netdev_alloc_skb_ip_align(dev, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) memory_squeeze:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* XXX tulip.c can defer packets here!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (!rx_in_place) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /* 16 byte align the data fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) dma_sync_single_for_cpu(dev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) (dma_addr_t)SWAP32(rbd->b_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) PKT_BUF_SZ, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) skb_put_data(skb, rbd->v_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) dma_sync_single_for_device(dev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) (dma_addr_t)SWAP32(rbd->b_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) PKT_BUF_SZ, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) skb->len = pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) dev->stats.rx_bytes += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) DEB(DEB_ERRORS, printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) "%s: Error, rfd.stat = 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) dev->name, rfd->stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (rfd->stat & SWAP16(0x0100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (rfd->stat & SWAP16(0x8000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (rfd->stat & SWAP16(0x0001))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) dev->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (rfd->stat & SWAP16(0x0002))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) dev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (rfd->stat & SWAP16(0x0004))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (rfd->stat & SWAP16(0x0008))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (rfd->stat & SWAP16(0x0010))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* Clear the buffer descriptor count and EOF + F flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) rbd->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) lp->rbd_head = rbd->v_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* Tidy the frame descriptor, marking it as end of list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) rfd->rbd = I596_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) rfd->stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) rfd->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* Update record of next frame descriptor to process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) lp->dma->scb.rfd = rfd->b_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) lp->rfd_head = rfd->v_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) dma_sync_dev(dev, rfd, sizeof(struct i596_rfd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* Remove end-of-list from old end descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) rfd->v_prev->cmd = SWAP16(CMD_FLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dma_sync_dev(dev, rfd->v_prev, sizeof(struct i596_rfd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) rfd = lp->rfd_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct i596_cmd *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) while (lp->cmd_head != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ptr = lp->cmd_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) lp->cmd_head = ptr->v_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) lp->cmd_backlog--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) switch (SWAP16(ptr->command) & 0x7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) case CmdTx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct sk_buff *skb = tx_cmd->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dma_unmap_single(dev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) tx_cmd->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) ptr->v_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ptr->b_next = I596_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) tx_cmd->cmd.command = 0; /* Mark as free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ptr->v_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ptr->b_next = I596_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) lp->dma->scb.cmd = I596_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) spin_lock_irqsave (&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* FIXME: this command might cause an lpmc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ca(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* wait for shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) spin_unlock_irqrestore (&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) i596_cleanup_cmd(dev, lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) i596_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) init_i596_mem(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct i596_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct i596_dma *dma = lp->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) lp->cmd_head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) cmd->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) cmd->v_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) cmd->b_next = I596_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) dma_sync_dev(dev, cmd, sizeof(struct i596_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) spin_lock_irqsave (&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (lp->cmd_head != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) lp->cmd_tail->v_next = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) dma_sync_dev(dev, lp->cmd_tail, sizeof(struct i596_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) lp->cmd_head = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) dma->scb.command = SWAP16(CUC_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ca(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) lp->cmd_tail = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) lp->cmd_backlog++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) spin_unlock_irqrestore (&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (lp->cmd_backlog > max_cmd_backlog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) unsigned long tickssofar = jiffies - lp->last_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (tickssofar < ticks_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) "%s: command unit timed out, status resetting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) i596_reset(dev, lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static int i596_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) DEB(DEB_OPEN, printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) "%s: i596_open() irq %d.\n", dev->name, dev->irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (init_rx_bufs(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (init_i596_mem(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) goto out_remove_rx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) out_remove_rx_bufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) remove_rx_bufs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct i596_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /* Transmitter timeout, serious problems. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) DEB(DEB_ERRORS, printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) "%s: transmit timed out, status resetting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /* Try to restart the adaptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (lp->last_restart == dev->stats.tx_packets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* Shutdown and restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) i596_reset (dev, lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* Issue a channel attention signal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) lp->dma->scb.command = SWAP16(CUC_START | RX_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) ca (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) lp->last_restart = dev->stats.tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) netif_wake_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct i596_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct tx_cmd *tx_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct i596_tbd *tbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) short length = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) DEB(DEB_STARTTX, printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) "%s: i596_start_xmit(%x,%p) called\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) dev->name, skb->len, skb->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (length < ETH_ZLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (skb_padto(skb, ETH_ZLEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) length = ETH_ZLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) tbd = lp->dma->tbds + lp->next_tx_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (tx_cmd->cmd.command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) DEB(DEB_ERRORS, printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) "%s: xmit ring full, dropping packet.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (++lp->next_tx_cmd == TX_RING_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) lp->next_tx_cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) tbd->next = I596_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) tx_cmd->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) tx_cmd->pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) tx_cmd->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) tbd->pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) tbd->size = SWAP16(EOF | length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) tbd->data = SWAP32(tx_cmd->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) dma_sync_dev(dev, tx_cmd, sizeof(struct tx_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) dma_sync_dev(dev, tbd, sizeof(struct i596_tbd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) i596_add_cmd(dev, &tx_cmd->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) dev->stats.tx_bytes += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static void print_eth(unsigned char *add, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) add, add + 6, add, add[12], add[13], str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static const struct net_device_ops i596_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) .ndo_open = i596_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) .ndo_stop = i596_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) .ndo_start_xmit = i596_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) .ndo_set_rx_mode = set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) .ndo_tx_timeout = i596_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) .ndo_poll_controller = i596_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static int i82596_probe(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct i596_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /* This lot is ensure things have been cache line aligned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) BUILD_BUG_ON(sizeof(struct i596_rbd) & 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) BUILD_BUG_ON(sizeof(struct tx_cmd) & 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) #ifndef __LP64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (!dev->base_addr || !dev->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) dev->netdev_ops = &i596_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) dev->watchdog_timeo = TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) memset(lp->dma, 0, sizeof(struct i596_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) lp->dma->scb.command = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) lp->dma->scb.cmd = I596_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) lp->dma->scb.rfd = I596_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) spin_lock_init(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) dma_sync_dev(dev, lp->dma, sizeof(struct i596_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) ret = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) dev->name, dev->base_addr, dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) dev->irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) DEB(DEB_INIT, printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) dev->name, lp->dma, (int)sizeof(struct i596_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) &lp->dma->scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static void i596_poll_controller(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) disable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) i596_interrupt(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) enable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static irqreturn_t i596_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct i596_private *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct i596_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unsigned short status, ack_cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) dma = lp->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) spin_lock (&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) status = SWAP16(dma->scb.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) DEB(DEB_INTS, printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) dev->name, dev->irq, status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ack_cmd = status & 0xf000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (!ack_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) DEB(DEB_ERRORS, printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) "%s: interrupt with no events\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) spin_unlock (&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if ((status & 0x8000) || (status & 0x2000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct i596_cmd *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if ((status & 0x8000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) DEB(DEB_INTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) "%s: i596 interrupt completed command.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if ((status & 0x2000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) DEB(DEB_INTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) "%s: i596 interrupt command unit inactive %x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) dev->name, status & 0x0700));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) while (lp->cmd_head != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) dma_sync_cpu(dev, lp->cmd_head, sizeof(struct i596_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (!(lp->cmd_head->status & SWAP16(STAT_C)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ptr = lp->cmd_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) DEB(DEB_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) "cmd_head->status = %04x, ->command = %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) SWAP16(lp->cmd_head->status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) SWAP16(lp->cmd_head->command)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) lp->cmd_head = ptr->v_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) lp->cmd_backlog--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) switch (SWAP16(ptr->command) & 0x7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) case CmdTx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) struct sk_buff *skb = tx_cmd->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (ptr->status & SWAP16(STAT_OK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) DEB(DEB_TXADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) print_eth(skb->data, "tx-done"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (ptr->status & SWAP16(0x0020))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (!(ptr->status & SWAP16(0x0040)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) dev->stats.tx_heartbeat_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (ptr->status & SWAP16(0x0400))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) dev->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (ptr->status & SWAP16(0x0800))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (ptr->status & SWAP16(0x1000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) dma_unmap_single(dev->dev.parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) tx_cmd->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) dev_consume_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) tx_cmd->cmd.command = 0; /* Mark free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) case CmdTDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (status & 0x8000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) DEB(DEB_ANY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) printk(KERN_DEBUG "%s: link ok.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (status & 0x4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) "%s: Transceiver problem.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (status & 0x2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) "%s: Termination problem.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (status & 0x1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) "%s: Short circuit.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) DEB(DEB_TDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) printk(KERN_DEBUG "%s: Time %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) dev->name, status & 0x07ff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) case CmdConfigure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * Zap command so set_multicast_list() know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * it is free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) ptr->command = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) ptr->v_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) ptr->b_next = I596_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) lp->last_cmd = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /* This mess is arranging that only the last of any outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * commands has the interrupt bit set. Should probably really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * only add to the cmd queue when the CU is stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) ptr = lp->cmd_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct i596_cmd *prev = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) ptr->command &= SWAP16(0x1fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) ptr = ptr->v_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) dma_sync_dev(dev, prev, sizeof(struct i596_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (lp->cmd_head != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) ack_cmd |= CUC_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if ((status & 0x1000) || (status & 0x4000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if ((status & 0x4000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) DEB(DEB_INTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) "%s: i596 interrupt received a frame.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) i596_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /* Only RX_START if stopped - RGH 07-07-96 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (status & 0x1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) DEB(DEB_ERRORS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) "%s: i596 interrupt receive unit inactive, status 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) dev->name, status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) ack_cmd |= RX_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) dev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) rebuild_rx_bufs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) dma->scb.command = SWAP16(ack_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* DANGER: I suspect that some kind of interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) acknowledgement aside from acking the 82596 might be needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) here... but it's running acceptably without */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) ca(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) spin_unlock (&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static int i596_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct i596_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) DEB(DEB_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) "%s: Shutting down ethercard, status was %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) dev->name, SWAP16(lp->dma->scb.status)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) wait_cmd(dev, lp->dma, 100, "close1 timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) dma_sync_dev(dev, &lp->dma->scb, sizeof(struct i596_scb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) ca(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) wait_cmd(dev, lp->dma, 100, "close2 timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) DEB(DEB_STRUCT, i596_display_data(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) i596_cleanup_cmd(dev, lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) remove_rx_bufs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * Set or clear the multicast filter for this adaptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) static void set_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct i596_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct i596_dma *dma = lp->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) int config = 0, cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) DEB(DEB_MULTI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) dev->name, netdev_mc_count(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) dev->flags & IFF_PROMISC ? "ON" : "OFF",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if ((dev->flags & IFF_PROMISC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) !(dma->cf_cmd.i596_config[8] & 0x01)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) dma->cf_cmd.i596_config[8] |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) config = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (!(dev->flags & IFF_PROMISC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) (dma->cf_cmd.i596_config[8] & 0x01)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) dma->cf_cmd.i596_config[8] &= ~0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) config = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if ((dev->flags & IFF_ALLMULTI) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) (dma->cf_cmd.i596_config[11] & 0x20)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) dma->cf_cmd.i596_config[11] &= ~0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) config = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (!(dev->flags & IFF_ALLMULTI) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) !(dma->cf_cmd.i596_config[11] & 0x20)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) dma->cf_cmd.i596_config[11] |= 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) config = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (dma->cf_cmd.cmd.command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) "%s: config change request already queued\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) dma_sync_dev(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) i596_add_cmd(dev, &dma->cf_cmd.cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) cnt = netdev_mc_count(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (cnt > MAX_MC_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) cnt = MAX_MC_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) dev->name, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (!netdev_mc_empty(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) unsigned char *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) struct mc_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) cmd = &dma->mc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) cmd->cmd.command = SWAP16(CmdMulticastList);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) cp = cmd->mc_addrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (!cnt--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) memcpy(cp, ha->addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (i596_debug > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) DEB(DEB_MULTI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) "%s: Adding address %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) dev->name, cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) cp += ETH_ALEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) dma_sync_dev(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) i596_add_cmd(dev, &cmd->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }