^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* cs89x0.c: A Crystal Semiconductor (Now Cirrus Logic) CS89[02]0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * driver for linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Written 1996 by Russell Nelson, with reference to skeleton.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * written 1993-1994 by Donald Becker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This software may be used and distributed according to the terms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * of the GNU General Public License, incorporated herein by reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * The author may be reached at nelson@crynwr.com, Crynwr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Software, 521 Pleasant Valley Rd., Potsdam, NY 13676
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Other contributors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Mike Cruse : mcruse@cti-ltd.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Russ Nelson
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Melody Lee : ethernet@crystal.cirrus.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Alan Cox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Andrew Morton
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Oskar Schirmer : oskar@scara.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Deepak Saxena : dsaxena@plexity.net
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Dmitry Pervushin : dpervushin@ru.mvista.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Deepak Saxena : dsaxena@plexity.net
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Domenico Andreoli : cavokz@gmail.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Set this to zero to disable DMA code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Note that even if DMA is turned off we still support the 'dma' and 'use_dma'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * module options so we don't break any startup scripts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #ifndef CONFIG_ISA_DMA_API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define ALLOW_DMA 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define ALLOW_DMA 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Set this to zero to remove all the debug statements via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * dead code elimination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define DEBUGGING 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Sources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Crynwr packet driver epktisa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Crystal Semiconductor data sheets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #include "cs89x0.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define cs89_dbg(val, level, fmt, ...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (val <= net_debug) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) pr_##level(fmt, ##__VA_ARGS__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static char version[] __initdata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) "v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define DRV_NAME "cs89x0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* First, a few definitions that the brave might change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * A zero-terminated list of I/O addresses to be probed. Some special flags..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Addr & 1 = Read back the address port, look for signature and reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * the page window before probing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Addr & 3 = Reset the page window and probe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * The CLPS eval board has the Cirrus chip at 0x80090300, in ARM IO space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * but it is possible that a Cirrus board could be plugged into the ISA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * them to system IRQ numbers. This mapping is card specific and is set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * the configuration of the Cirrus Eval board for this chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #ifndef CONFIG_CS89x0_PLATFORM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static unsigned int netcard_portlist[] __used __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static unsigned int cs8900_irq_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 10, 11, 12, 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #if DEBUGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static unsigned int net_debug = DEBUGGING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define net_debug 0 /* gcc will remove all the debug code for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* The number of low I/O ports used by the ethercard. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define NETCARD_IO_EXTENT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* we allow the user to override various values normally set in the EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define FORCE_RJ45 0x0001 /* pick one of these three */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define FORCE_AUI 0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define FORCE_BNC 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define FORCE_AUTO 0x0010 /* pick one of these three */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define FORCE_HALF 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define FORCE_FULL 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* Information that need to be kept for each board. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct net_local {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int chip_type; /* one of: CS8900, CS8920, CS8920M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) char chip_revision; /* revision letter of the chip ('A'...) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int send_cmd; /* the proper send command: TX_NOW, TX_AFTER_381, or TX_AFTER_ALL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int auto_neg_cnf; /* auto-negotiation word from EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int adapter_cnf; /* adapter configuration from EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int isa_config; /* ISA configuration from EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int irq_map; /* IRQ map from EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int rx_mode; /* what mode are we in? 0, RX_MULTCAST_ACCEPT, or RX_ALL_ACCEPT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int curr_rx_cfg; /* a copy of PP_RxCFG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int linectl; /* either 0 or LOW_RX_SQUELCH, depending on configuration. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int send_underrun; /* keep track of how many underruns in a row we get */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int force; /* force various values; see FORCE* above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void __iomem *virt_addr;/* CS89x0 virtual address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int use_dma; /* Flag: we're using dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int dma; /* DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int dmasize; /* 16 or 64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned char *dma_buff; /* points to the beginning of the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned char *end_dma_buff; /* points to the end of the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned char *rx_dma_ptr; /* points to the next packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Example routines you must write ;->. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define tx_done(dev) 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Permit 'cs89x0_dma=N' in the kernel boot environment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #if !defined(MODULE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static int g_cs89x0_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static int __init dma_fn(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) g_cs89x0_dma = simple_strtol(str, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) __setup("cs89x0_dma=", dma_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #endif /* ALLOW_DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static int g_cs89x0_media__force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int __init media_fn(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!strcmp(str, "rj45"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) g_cs89x0_media__force = FORCE_RJ45;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) else if (!strcmp(str, "aui"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) g_cs89x0_media__force = FORCE_AUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) else if (!strcmp(str, "bnc"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) g_cs89x0_media__force = FORCE_BNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) __setup("cs89x0_media=", media_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void readwords(struct net_local *lp, int portno, void *buf, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u8 *buf8 = (u8 *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u16 tmp16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) tmp16 = ioread16(lp->virt_addr + portno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) *buf8++ = (u8)tmp16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *buf8++ = (u8)(tmp16 >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) } while (--length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static void writewords(struct net_local *lp, int portno, void *buf, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) u8 *buf8 = (u8 *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) u16 tmp16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) tmp16 = *buf8++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) tmp16 |= (*buf8++) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) iowrite16(tmp16, lp->virt_addr + portno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) } while (--length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static u16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) readreg(struct net_device *dev, u16 regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) iowrite16(regno, lp->virt_addr + ADD_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return ioread16(lp->virt_addr + DATA_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) writereg(struct net_device *dev, u16 regno, u16 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) iowrite16(regno, lp->virt_addr + ADD_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) iowrite16(value, lp->virt_addr + DATA_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) wait_eeprom_ready(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned long timeout = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* check to see if the EEPROM is ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * a timeout is used just in case EEPROM is ready when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * SI_BUSY in the PP_SelfST is clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) while (readreg(dev, PP_SelfST) & SI_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (time_after_eq(jiffies, timeout + 40))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) get_eeprom_data(struct net_device *dev, int off, int len, int *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) cs89_dbg(3, info, "EEPROM data from %x for %x:", off, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (wait_eeprom_ready(dev) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* Now send the EEPROM read command and EEPROM location to read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) writereg(dev, PP_EECMD, (off + i) | EEPROM_READ_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (wait_eeprom_ready(dev) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) buffer[i] = readreg(dev, PP_EEData);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) cs89_dbg(3, cont, " %04x", buffer[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) cs89_dbg(3, cont, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) get_eeprom_cksum(int off, int len, int *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int i, cksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) cksum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) cksum += buffer[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) cksum &= 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (cksum == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) write_irq(struct net_device *dev, int chip_type, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (chip_type == CS8900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #ifndef CONFIG_CS89x0_PLATFORM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Search the mapping table for the corresponding IRQ pin. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (cs8900_irq_map[i] == irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* Not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (i == ARRAY_SIZE(cs8900_irq_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) i = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* INTRQ0 pin is used for interrupt generation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) writereg(dev, PP_CS8900_ISAINT, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) writereg(dev, PP_CS8920_ISAINT, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) count_rx_errors(int status, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (status & RX_RUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (status & RX_EXTRA_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA | RX_RUNT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* per str 172 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (status & RX_DRIBBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*********************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * This page contains DMA routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) *********************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #define dma_page_eq(ptr1, ptr2) ((long)(ptr1) >> 17 == (long)(ptr2) >> 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) get_dma_channel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (lp->dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) dev->dma = lp->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) lp->isa_config |= ISA_RxDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if ((lp->isa_config & ANY_ISA_DMA) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dev->dma = lp->isa_config & DMA_NO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (lp->chip_type == CS8900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dev->dma += 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (dev->dma < 5 || dev->dma > 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) lp->isa_config &= ~ANY_ISA_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) write_dma(struct net_device *dev, int chip_type, int dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if ((lp->isa_config & ANY_ISA_DMA) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (chip_type == CS8900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) writereg(dev, PP_CS8900_ISADMA, dma - 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) writereg(dev, PP_CS8920_ISADMA, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) set_dma_cfg(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (lp->use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if ((lp->isa_config & ANY_ISA_DMA) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) cs89_dbg(3, err, "set_dma_cfg(): no DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (lp->isa_config & ISA_RxDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) lp->curr_rx_cfg |= RX_DMA_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) cs89_dbg(3, info, "set_dma_cfg(): RX_DMA_ONLY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) lp->curr_rx_cfg |= AUTO_RX_DMA; /* not that we support it... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) cs89_dbg(3, info, "set_dma_cfg(): AUTO_RX_DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) dma_bufcfg(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (lp->use_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return (lp->isa_config & ANY_ISA_DMA) ? RX_DMA_ENBL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) dma_busctl(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (lp->use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (lp->isa_config & ANY_ISA_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) retval |= RESET_RX_DMA; /* Reset the DMA pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (lp->isa_config & DMA_BURST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) retval |= DMA_BURST_MODE; /* Does ISA config specify DMA burst ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (lp->dmasize == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) retval |= RX_DMA_SIZE_64K; /* did they ask for 64K? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) retval |= MEMORY_ON; /* we need memory enabled to use DMA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dma_rx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int status, length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) unsigned char *bp = lp->rx_dma_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) status = bp[0] + (bp[1] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) length = bp[2] + (bp[3] << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) bp += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) cs89_dbg(5, debug, "%s: receiving DMA packet at %lx, status %x, length %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dev->name, (unsigned long)bp, status, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if ((status & RX_OK) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) count_rx_errors(status, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) goto skip_this_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* Malloc up new buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) skb = netdev_alloc_skb(dev, length + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* AKPM: advance bp to the next frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) skip_this_frame:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) bp += (length + 3) & ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (bp >= lp->end_dma_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) bp -= lp->dmasize * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) lp->rx_dma_ptr = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) skb_reserve(skb, 2); /* longword align L3 header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (bp + length > lp->end_dma_buff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) int semi_cnt = lp->end_dma_buff - bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) skb_put_data(skb, bp, semi_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) skb_put_data(skb, lp->dma_buff, length - semi_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) skb_put_data(skb, bp, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) bp += (length + 3) & ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (bp >= lp->end_dma_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) bp -= lp->dmasize*1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) lp->rx_dma_ptr = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) cs89_dbg(3, info, "%s: received %d byte DMA packet of type %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) dev->name, length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ((skb->data[ETH_ALEN + ETH_ALEN] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) skb->data[ETH_ALEN + ETH_ALEN + 1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) dev->stats.rx_bytes += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static void release_dma_buff(struct net_local *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (lp->dma_buff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) free_pages((unsigned long)(lp->dma_buff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) get_order(lp->dmasize * 1024));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) lp->dma_buff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #endif /* ALLOW_DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) control_dc_dc(struct net_device *dev, int on_not_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) unsigned int selfcontrol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned long timenow = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* control the DC to DC convertor in the SelfControl register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * Note: This is hooked up to a general purpose pin, might not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * always be a DC to DC convertor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) selfcontrol = HCB1_ENBL; /* Enable the HCB1 bit as an output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (((lp->adapter_cnf & A_CNF_DC_DC_POLARITY) != 0) ^ on_not_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) selfcontrol |= HCB1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) selfcontrol &= ~HCB1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) writereg(dev, PP_SelfCTL, selfcontrol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Wait for the DC/DC converter to power up - 500ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) while (time_before(jiffies, timenow + HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* send a test packet - return true if carrier bits are ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) send_test_pkt(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) char test_packet[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 0, 46, /* A 46 in network order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 0, 0, /* DSAP=0 & SSAP=0 fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 0xf3, 0 /* Control (Test Req + P bit set) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) unsigned long timenow = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) memcpy(test_packet, dev->dev_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) memcpy(test_packet + ETH_ALEN, dev->dev_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) iowrite16(TX_AFTER_ALL, lp->virt_addr + TX_CMD_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) iowrite16(ETH_ZLEN, lp->virt_addr + TX_LEN_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /* Test to see if the chip has allocated memory for the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) while (time_before(jiffies, timenow + 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (time_after_eq(jiffies, timenow + 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return 0; /* this shouldn't happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* Write the contents of the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) writewords(lp, TX_FRAME_PORT, test_packet, (ETH_ZLEN + 1) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) cs89_dbg(1, debug, "Sending test packet ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* wait a couple of jiffies for packet to be received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) for (timenow = jiffies; time_before(jiffies, timenow + 3);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) cs89_dbg(1, cont, "succeeded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) cs89_dbg(1, cont, "failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) #define DETECTED_NONE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) #define DETECTED_RJ45H 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) #define DETECTED_RJ45F 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) #define DETECTED_AUI 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) #define DETECTED_BNC 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) detect_tp(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) unsigned long timenow = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int fdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) cs89_dbg(1, debug, "%s: Attempting TP\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* If connected to another full duplex capable 10-Base-T card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * the link pulses seem to be lost when the auto detect bit in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * the LineCTL is set. To overcome this the auto detect bit will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * be cleared whilst testing the 10-Base-T interface. This would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * not be necessary for the sparrow chip but is simpler to do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) writereg(dev, PP_LineCTL, lp->linectl & ~AUI_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) control_dc_dc(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* Delay for the hardware to work out if the TP cable is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * - 150ms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) for (timenow = jiffies; time_before(jiffies, timenow + 15);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if ((readreg(dev, PP_LineST) & LINK_OK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return DETECTED_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (lp->chip_type == CS8900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) switch (lp->force & 0xf0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) case FORCE_AUTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) pr_info("%s: cs8900 doesn't autonegotiate\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return DETECTED_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* CS8900 doesn't support AUTO, change to HALF*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) case FORCE_AUTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) lp->force &= ~FORCE_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) lp->force |= FORCE_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) case FORCE_HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) case FORCE_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) writereg(dev, PP_TestCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) readreg(dev, PP_TestCTL) | FDX_8900);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) fdx = readreg(dev, PP_TestCTL) & FDX_8900;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) switch (lp->force & 0xf0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) case FORCE_AUTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) lp->auto_neg_cnf = AUTO_NEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) case FORCE_HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) lp->auto_neg_cnf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) case FORCE_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) lp->auto_neg_cnf = RE_NEG_NOW | ALLOW_FDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) writereg(dev, PP_AutoNegCTL, lp->auto_neg_cnf & AUTO_NEG_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) pr_info("%s: negotiating duplex...\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (time_after(jiffies, timenow + 4000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) pr_err("**** Full / half duplex auto-negotiation timed out ****\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) fdx = readreg(dev, PP_AutoNegST) & FDX_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (fdx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return DETECTED_RJ45F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return DETECTED_RJ45H;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) detect_bnc(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) cs89_dbg(1, debug, "%s: Attempting BNC\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) control_dc_dc(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (send_test_pkt(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return DETECTED_BNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return DETECTED_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) detect_aui(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) cs89_dbg(1, debug, "%s: Attempting AUI\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) control_dc_dc(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (send_test_pkt(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return DETECTED_AUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return DETECTED_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* We have a good packet(s), get it/them out of the buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) net_rx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) int status, length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) status = ioread16(lp->virt_addr + RX_FRAME_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) length = ioread16(lp->virt_addr + RX_FRAME_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if ((status & RX_OK) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) count_rx_errors(status, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* Malloc up new buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) skb = netdev_alloc_skb(dev, length + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) skb_reserve(skb, 2); /* longword align L3 header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) readwords(lp, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (length & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) skb->data[length-1] = ioread16(lp->virt_addr + RX_FRAME_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) cs89_dbg(3, debug, "%s: received %d byte packet of type %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) dev->name, length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) (skb->data[ETH_ALEN + ETH_ALEN] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) skb->data[ETH_ALEN + ETH_ALEN + 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) dev->stats.rx_bytes += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* The typical workload of the driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * Handle the network interface interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static irqreturn_t net_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct net_local *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* we MUST read all the events out of the ISQ, otherwise we'll never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * get interrupted again. As a consequence, we can't have any limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * on the number of times we loop in the interrupt handler. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * hardware guarantees that eventually we'll run out of events. Of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * course, if you're on a slow machine, and packets are arriving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * faster than you can read them off, you're screwed. Hasta la
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * vista, baby!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) while ((status = ioread16(lp->virt_addr + ISQ_PORT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) cs89_dbg(4, debug, "%s: event=%04x\n", dev->name, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) switch (status & ISQ_EVENT_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) case ISQ_RECEIVER_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Got a packet(s). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) net_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) case ISQ_TRANSMITTER_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) netif_wake_queue(dev); /* Inform upper layers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if ((status & (TX_OK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) TX_LOST_CRS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) TX_SQE_ERROR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) TX_LATE_COL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) TX_16_COL)) != TX_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if ((status & TX_OK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (status & TX_LOST_CRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) dev->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (status & TX_SQE_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) dev->stats.tx_heartbeat_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (status & TX_LATE_COL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) dev->stats.tx_window_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (status & TX_16_COL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) case ISQ_BUFFER_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (status & READY_FOR_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /* we tried to transmit a packet earlier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * but inexplicably ran out of buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * That shouldn't happen since we only ever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * load one packet. Shrug. Do the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * thing anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) netif_wake_queue(dev); /* Inform upper layers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (status & TX_UNDERRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) cs89_dbg(0, err, "%s: transmit underrun\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) lp->send_underrun++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (lp->send_underrun == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) lp->send_cmd = TX_AFTER_381;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) else if (lp->send_underrun == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) lp->send_cmd = TX_AFTER_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* transmit cycle is done, although
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * frame wasn't transmitted - this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * avoids having to wait for the upper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * layers to timeout on us, in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * event of a tx underrun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) netif_wake_queue(dev); /* Inform upper layers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (lp->use_dma && (status & RX_DMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) int count = readreg(dev, PP_DmaFrameCnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) cs89_dbg(5, debug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) "%s: receiving %d DMA frames\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) dev->name, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (count > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) cs89_dbg(2, debug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) "%s: receiving %d DMA frames\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dev->name, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) dma_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (--count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) count = readreg(dev, PP_DmaFrameCnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) cs89_dbg(2, debug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) "%s: continuing with %d DMA frames\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) dev->name, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) case ISQ_RX_MISS_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) dev->stats.rx_missed_errors += (status >> 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) case ISQ_TX_COL_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) dev->stats.collisions += (status >> 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /* Open/initialize the board. This is called (in the current kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) sometime after booting when the 'ifconfig' program is run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) This routine should set everything up anew at each open, even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) registers that "should" only need to be set once at boot, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) there is non-reboot way to recover if something goes wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* AKPM: do we need to do any locking here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) net_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (dev->irq < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* Allow interrupts to be generated by the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* Cirrus' release had this: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) | ENABLE_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /* And 2.3.47 had this: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) for (i = 2; i < CS8920_NO_INTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if ((1 << i) & lp->irq_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (request_irq(i, net_interrupt, 0, dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dev) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) dev->irq = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) write_irq(dev, lp->chip_type, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (i >= CS8920_NO_INTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) writereg(dev, PP_BusCTL, 0); /* disable interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) pr_err("can't get an interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) goto bad_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) #if !defined(CONFIG_CS89x0_PLATFORM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (((1 << dev->irq) & lp->irq_map) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) dev->name, dev->irq, lp->irq_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) goto bad_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* FIXME: Cirrus' release had this: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* And 2.3.47 had this: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) write_irq(dev, lp->chip_type, dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) pr_err("request_irq(%d) failed\n", dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) goto bad_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (lp->use_dma && (lp->isa_config & ANY_ISA_DMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) get_order(lp->dmasize * 1024));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (!lp->dma_buff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) pr_err("%s: cannot get %dK memory for DMA\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) dev->name, lp->dmasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) goto release_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) cs89_dbg(1, debug, "%s: dma %lx %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) (unsigned long)lp->dma_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) (unsigned long)isa_virt_to_bus(lp->dma_buff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if ((unsigned long)lp->dma_buff >= MAX_DMA_ADDRESS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) !dma_page_eq(lp->dma_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) lp->dma_buff + lp->dmasize * 1024 - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) pr_err("%s: not usable as DMA buffer\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) goto release_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) memset(lp->dma_buff, 0, lp->dmasize * 1024); /* Why? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (request_dma(dev->dma, dev->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) pr_err("%s: cannot get dma channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) dev->name, dev->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) goto release_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) write_dma(dev, lp->chip_type, dev->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) lp->rx_dma_ptr = lp->dma_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) lp->end_dma_buff = lp->dma_buff + lp->dmasize * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) disable_dma(dev->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) clear_dma_ff(dev->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) set_dma_mode(dev->dma, DMA_RX_MODE); /* auto_init as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) set_dma_count(dev->dma, lp->dmasize * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) enable_dma(dev->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) #endif /* ALLOW_DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* set the Ethernet address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) for (i = 0; i < ETH_ALEN / 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) writereg(dev, PP_IA + i * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) (dev->dev_addr[i * 2] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) (dev->dev_addr[i * 2 + 1] << 8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /* while we're testing the interface, leave interrupts disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) writereg(dev, PP_BusCTL, MEMORY_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* Set the LineCTL quintuplet based on adapter configuration read from EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) lp->linectl = LOW_RX_SQUELCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) lp->linectl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* check to make sure that they have the "right" hardware available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) case A_CNF_MEDIA_10B_T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) result = lp->adapter_cnf & A_CNF_10B_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) case A_CNF_MEDIA_AUI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) result = lp->adapter_cnf & A_CNF_AUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) case A_CNF_MEDIA_10B_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) result = lp->adapter_cnf & A_CNF_10B_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) result = lp->adapter_cnf & (A_CNF_10B_T |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) A_CNF_AUI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) A_CNF_10B_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) pr_err("%s: EEPROM is configured for unavailable media\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) release_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) free_dma(dev->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) release_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) release_dma_buff(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) writereg(dev, PP_LineCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) goto bad_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* set the hardware to the configured choice */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) case A_CNF_MEDIA_10B_T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) result = detect_tp(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (result == DETECTED_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) pr_warn("%s: 10Base-T (RJ-45) has no cable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) result = DETECTED_RJ45H; /* Yes! I don't care if I see a link pulse */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) case A_CNF_MEDIA_AUI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) result = detect_aui(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (result == DETECTED_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) pr_warn("%s: 10Base-5 (AUI) has no cable\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) case A_CNF_MEDIA_10B_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) result = detect_bnc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (result == DETECTED_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) pr_warn("%s: 10Base-2 (BNC) has no cable\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) result = DETECTED_BNC; /* Yes! I don't care if I can xmit a packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) case A_CNF_MEDIA_AUTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (lp->adapter_cnf & A_CNF_10B_T) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) result = detect_tp(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (result != DETECTED_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (lp->adapter_cnf & A_CNF_AUI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) result = detect_aui(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (result != DETECTED_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (lp->adapter_cnf & A_CNF_10B_2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) result = detect_bnc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (result != DETECTED_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) pr_err("%s: no media detected\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) goto release_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) case DETECTED_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) pr_err("%s: no network cable attached to configured media\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) goto release_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) case DETECTED_RJ45H:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) pr_info("%s: using half-duplex 10Base-T (RJ-45)\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) case DETECTED_RJ45F:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) pr_info("%s: using full-duplex 10Base-T (RJ-45)\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) case DETECTED_AUI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) pr_info("%s: using 10Base-5 (AUI)\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) case DETECTED_BNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) pr_info("%s: using 10Base-2 (BNC)\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* Turn on both receive and transmit operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) writereg(dev, PP_LineCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* Receive only error free packets addressed to this card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) lp->rx_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) writereg(dev, PP_RxCTL, DEF_RX_ACCEPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (lp->isa_config & STREAM_TRANSFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) lp->curr_rx_cfg |= RX_STREAM_ENBL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) set_dma_cfg(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) writereg(dev, PP_RxCFG, lp->curr_rx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) writereg(dev, PP_TxCFG, (TX_LOST_CRS_ENBL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) TX_SQE_ERROR_ENBL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) TX_OK_ENBL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) TX_LATE_COL_ENBL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) TX_JBR_ENBL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) TX_ANY_COL_ENBL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) TX_16_COL_ENBL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) writereg(dev, PP_BufCFG, (READY_FOR_TX_ENBL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) RX_MISS_COUNT_OVRFLOW_ENBL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) dma_bufcfg(dev) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) TX_COL_COUNT_OVRFLOW_ENBL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) TX_UNDERRUN_ENBL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* now that we've got our act together, enable everything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) writereg(dev, PP_BusCTL, (ENABLE_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) | (dev->mem_start ? MEMORY_ON : 0) /* turn memory on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) | dma_busctl(dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) cs89_dbg(1, debug, "net_open() succeeded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) bad_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* The inverse routine to net_open(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) net_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) writereg(dev, PP_RxCFG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) writereg(dev, PP_TxCFG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) writereg(dev, PP_BufCFG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) writereg(dev, PP_BusCTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (lp->use_dma && lp->dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) free_dma(dev->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) release_dma_buff(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* Update the statistics here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* Get the current statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * This may be called with the card open or closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static struct net_device_stats *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) net_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* Update the statistics from the device registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static void net_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /* If we get here, some higher level has decided we are broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) There should really be a "kick me" function call instead. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) cs89_dbg(0, err, "%s: transmit timed out, %s?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) tx_done(dev) ? "IRQ conflict" : "network cable problem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /* Try to restart the adaptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) cs89_dbg(3, debug, "%s: sent %d byte packet of type %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) dev->name, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) ((skb->data[ETH_ALEN + ETH_ALEN] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) skb->data[ETH_ALEN + ETH_ALEN + 1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /* keep the upload from being interrupted, since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * ask the chip to start transmitting before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * whole packet has been completely uploaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) /* initiate a transmit sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) iowrite16(lp->send_cmd, lp->virt_addr + TX_CMD_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) iowrite16(skb->len, lp->virt_addr + TX_LEN_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* Test to see if the chip has allocated memory for the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* Gasp! It hasn't. But that shouldn't happen since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * we're waiting for TxOk, so return 1 and requeue this packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) cs89_dbg(0, err, "Tx buffer not free!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /* Write the contents of the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) dev_consume_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /* We DO NOT call netif_wake_queue() here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * We also DO NOT call netif_start_queue().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * Either of these would cause another bottom half run through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * net_send_packet() before this packet has fully gone out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * That causes us to hit the "Gasp!" above and the send is rescheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * it runs like a dog. We just return and wait for the Tx completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * interrupt handler to restart the netdevice layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static void set_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) u16 cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (dev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) lp->rx_mode = RX_ALL_ACCEPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /* The multicast-accept list is initialized to accept-all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * and we rely on higher-level filtering for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) lp->rx_mode = RX_MULTCAST_ACCEPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) lp->rx_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /* in promiscuous mode, we accept errored packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * so we have to enable interrupts on them also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) cfg = lp->curr_rx_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (lp->rx_mode == RX_ALL_ACCEPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) writereg(dev, PP_RxCFG, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static int set_mac_address(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) dev->name, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /* set the Ethernet address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) for (i = 0; i < ETH_ALEN / 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) writereg(dev, PP_IA + i * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) (dev->dev_addr[i * 2] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) (dev->dev_addr[i * 2 + 1] << 8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * Polling receive - used by netconsole and other diagnostic tools
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * to allow network i/o with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) static void net_poll_controller(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) disable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) net_interrupt(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) enable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static const struct net_device_ops net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) .ndo_open = net_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) .ndo_stop = net_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) .ndo_tx_timeout = net_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) .ndo_start_xmit = net_send_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) .ndo_get_stats = net_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) .ndo_set_rx_mode = set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) .ndo_set_mac_address = set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) .ndo_poll_controller = net_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static void __init reset_chip(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) #if !defined(CONFIG_MACH_MX31ADS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) unsigned long reset_start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) /* wait 30 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) msleep(30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (lp->chip_type != CS8900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /* Hardware problem requires PNP registers to be reconfigured after a reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) iowrite8(dev->irq, lp->virt_addr + DATA_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) iowrite8(0, lp->virt_addr + DATA_PORT + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) iowrite16(PP_CS8920_ISAMemB, lp->virt_addr + ADD_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) iowrite8((dev->mem_start >> 16) & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) lp->virt_addr + DATA_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) iowrite8((dev->mem_start >> 8) & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) lp->virt_addr + DATA_PORT + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /* Wait until the chip is reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) reset_start_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) time_before(jiffies, reset_start_time + 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) #endif /* !CONFIG_MACH_MX31ADS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) /* This is the real probe routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) * Linux has a history of friendly device probes on the ISA bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * A good device probes avoids doing writes, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * verifies that the correct device exists and functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * Return 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) unsigned rev_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) int eeprom_buff[CHKSUM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) /* Initialize the device structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (!modular) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) memset(lp, 0, sizeof(*lp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) spin_lock_init(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) #ifndef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (g_cs89x0_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) lp->use_dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) lp->dma = g_cs89x0_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) lp->dmasize = 16; /* Could make this an option... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) lp->force = g_cs89x0_media__force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) pr_debug("PP_addr at %p[%x]: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) ioaddr, ADD_PORT, ioread16(ioaddr + ADD_PORT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) iowrite16(PP_ChipID, ioaddr + ADD_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) tmp = ioread16(ioaddr + DATA_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (tmp != CHIP_EISA_ID_SIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) pr_debug("%s: incorrect signature at %p[%x]: 0x%x!="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) CHIP_EISA_ID_SIG_STR "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) dev->name, ioaddr, DATA_PORT, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) retval = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) lp->virt_addr = ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /* get the chip type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) rev_type = readreg(dev, PRODUCT_ID_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) lp->chip_type = rev_type & ~REVISON_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* Check the chip type and revision in order to set the correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * send command. CS8920 revision C and CS8900 revision F can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * the faster send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) lp->send_cmd = TX_AFTER_381;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) lp->send_cmd = TX_NOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) lp->send_cmd = TX_NOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) pr_info_once("%s\n", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) pr_info("%s: cs89%c0%s rev %c found at %p ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) lp->chip_type == CS8900 ? '0' : '2',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) lp->chip_type == CS8920M ? "M" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) lp->chip_revision,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) lp->virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) reset_chip(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /* Here we read the current configuration of the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * If there is no Extended EEPROM then the idea is to not disturb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * the chip configuration, it should have been correctly setup by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * automatic EEPROM read on reset. So, if the chip says it read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * the EEPROM the driver will always do *something* instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * complain that adapter_cnf is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) (EEPROM_OK | EEPROM_PRESENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) /* Load the MAC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) for (i = 0; i < ETH_ALEN / 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) unsigned int Addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) Addr = readreg(dev, PP_IA + i * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) dev->dev_addr[i * 2] = Addr & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) dev->dev_addr[i * 2 + 1] = Addr >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /* Load the Adapter Configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * Note: Barring any more specific information from some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * other source (ie EEPROM+Schematics), we would not know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * how to operate a 10Base2 interface on the AUI port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * However, since we do read the status of HCB1 and use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * settings that always result in calls to control_dc_dc(dev,0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * a BNC interface should work if the enable pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * (dc/dc converter) is on HCB1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * It will be called AUI however.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) lp->adapter_cnf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) i = readreg(dev, PP_LineCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) /* Preserve the setting of the HCB1 pin. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) lp->adapter_cnf |= A_CNF_DC_DC_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /* Save the sqelch bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* Check if the card is in 10Base-t only mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) lp->adapter_cnf |= A_CNF_10B_T | A_CNF_MEDIA_10B_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* Check if the card is in AUI only mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) lp->adapter_cnf |= A_CNF_AUI | A_CNF_MEDIA_AUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) /* Check if the card is in Auto mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) cs89_dbg(1, info, "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) dev->name, i, lp->adapter_cnf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) /* IRQ. Other chips already probe, see below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (lp->chip_type == CS8900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) pr_cont("[Cirrus EEPROM] ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /* First check to see if an EEPROM is attached. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) pr_warn("No EEPROM, relying on command line....\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) else if (get_eeprom_data(dev, START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) pr_warn("EEPROM read failed, relying on command line\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) } else if (get_eeprom_cksum(START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /* Check if the chip was able to read its own configuration starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) at 0 in the EEPROM*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) (EEPROM_OK | EEPROM_PRESENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) pr_warn("Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /* This reads an extended EEPROM that is not documented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * in the CS8900 datasheet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) /* get transmission control word but keep the autonegotiation bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (!lp->auto_neg_cnf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET / 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /* Store adapter configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (!lp->adapter_cnf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET / 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /* Store ISA configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) lp->isa_config = eeprom_buff[ISA_CNF_OFFSET / 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET / 2] << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /* eeprom_buff has 32-bit ints, so we can't just memcpy it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /* store the initial memory base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) for (i = 0; i < ETH_ALEN / 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) dev->dev_addr[i * 2] = eeprom_buff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) dev->dev_addr[i * 2 + 1] = eeprom_buff[i] >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) cs89_dbg(1, debug, "%s: new adapter_cnf: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) dev->name, lp->adapter_cnf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /* allow them to force multiple transceivers. If they force multiple, autosense */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (lp->force & FORCE_RJ45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) lp->adapter_cnf |= A_CNF_10B_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (lp->force & FORCE_AUI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) lp->adapter_cnf |= A_CNF_AUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (lp->force & FORCE_BNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) lp->adapter_cnf |= A_CNF_10B_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (count > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) lp->adapter_cnf |= A_CNF_MEDIA_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) else if (lp->force & FORCE_RJ45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) lp->adapter_cnf |= A_CNF_MEDIA_10B_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) else if (lp->force & FORCE_AUI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) lp->adapter_cnf |= A_CNF_MEDIA_AUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) else if (lp->force & FORCE_BNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) lp->adapter_cnf |= A_CNF_MEDIA_10B_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) cs89_dbg(1, debug, "%s: after force 0x%x, adapter_cnf=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) dev->name, lp->force, lp->adapter_cnf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /* FIXME: we don't set the Ethernet address on the command line. Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * ifconfig IFACE hw ether AABBCCDDEEFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) pr_info("media %s%s%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) (lp->adapter_cnf & A_CNF_10B_T) ? "RJ-45," : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) (lp->adapter_cnf & A_CNF_AUI) ? "AUI," : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) (lp->adapter_cnf & A_CNF_10B_2) ? "BNC," : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) lp->irq_map = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /* If this is a CS8900 then no pnp soft */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (lp->chip_type != CS8900 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /* Check if the ISA IRQ has been set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) (i = readreg(dev, PP_CS8920_ISAINT) & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) (i != 0 && i < CS8920_NO_INTS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (!dev->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) dev->irq = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) i = lp->isa_config & INT_NO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) #ifndef CONFIG_CS89x0_PLATFORM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (lp->chip_type == CS8900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /* Translate the IRQ using the IRQ mapping table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (i >= ARRAY_SIZE(cs8900_irq_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) pr_err("invalid ISA interrupt number %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) i = cs8900_irq_map[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) lp->irq_map = CS8900_IRQ_MAP; /* fixed IRQ map for CS8900 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) int irq_map_buff[IRQ_MAP_LEN/2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) IRQ_MAP_LEN / 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) irq_map_buff) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) lp->irq_map = ((irq_map_buff[0] >> 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) (irq_map_buff[1] << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (!dev->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) dev->irq = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) pr_cont(" IRQ %d", dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (lp->use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) get_dma_channel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) pr_cont(", DMA %d", dev->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) pr_cont(", programmed I/O");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) /* print the ethernet address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) pr_cont(", MAC %pM\n", dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) dev->netdev_ops = &net_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) dev->watchdog_timeo = HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) cs89_dbg(0, info, "cs89x0_probe1() successful\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) retval = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) #ifndef CONFIG_CS89x0_PLATFORM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * This function converts the I/O port address used by the cs89x0_probe() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * init_module() functions to the I/O memory address used by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * cs89x0_probe1() function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) cs89x0_ioport_probe(struct net_device *dev, unsigned long ioport, int modular)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) struct net_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) void __iomem *io_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (!lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) dev->base_addr = ioport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (!request_region(ioport, NETCARD_IO_EXTENT, DRV_NAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) io_mem = ioport_map(ioport & ~3, NETCARD_IO_EXTENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (!io_mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) /* if they give us an odd I/O address, then do ONE write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * the address port, to get it back to address zero, where we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * expect to find the EISA signature word. An IO with a base of 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * will skip the test for the ADD_PORT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (ioport & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) cs89_dbg(1, info, "%s: odd ioaddr 0x%lx\n", dev->name, ioport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if ((ioport & 2) != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if ((ioread16(io_mem + ADD_PORT) & ADD_MASK) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) ADD_SIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) pr_err("%s: bad signature 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) dev->name, ioread16(io_mem + ADD_PORT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) ret = cs89x0_probe1(dev, io_mem, modular);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) ioport_unmap(io_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) release_region(ioport, NETCARD_IO_EXTENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) #ifndef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) /* Check for a network adaptor of this type, and return '0' iff one exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * If dev->base_addr == 0, probe all likely locations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * If dev->base_addr == 1, always return failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * If dev->base_addr == 2, allocate space for the device and return success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * (detachable devices only).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * Return 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) struct net_device * __init cs89x0_probe(int unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) unsigned *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) int io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) sprintf(dev->name, "eth%d", unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) netdev_boot_setup_check(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) io = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) irq = dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) cs89_dbg(0, info, "cs89x0_probe(0x%x)\n", io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (io > 0x1ff) { /* Check a single specified location. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) err = cs89x0_ioport_probe(dev, io, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) } else if (io != 0) { /* Don't probe at all. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) for (port = netcard_portlist; *port; port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (cs89x0_ioport_probe(dev, *port, 0) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) dev->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (!*port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) pr_warn("no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) #if defined(MODULE) && !defined(CONFIG_CS89x0_PLATFORM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) static struct net_device *dev_cs89x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) /* Support the 'debug' module parm even if we're compiled for non-debug to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * avoid breaking someone's startup scripts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) static int io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) static int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) static int debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) static char media[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static int duplex = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) static int use_dma; /* These generate unused var warnings if ALLOW_DMA = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) static int dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static int dmasize = 16; /* or 64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) module_param_hw(io, int, ioport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) module_param_hw(irq, int, irq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) module_param(debug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) module_param_string(media, media, sizeof(media), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) module_param(duplex, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) module_param_hw(dma , int, dma, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) module_param(dmasize , int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) module_param(use_dma , int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) MODULE_PARM_DESC(io, "cs89x0 I/O base address");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) MODULE_PARM_DESC(irq, "cs89x0 IRQ number");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) #if DEBUGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) MODULE_PARM_DESC(debug, "cs89x0 debug level (0-6)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) MODULE_PARM_DESC(debug, "(ignored)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) MODULE_PARM_DESC(media, "Set cs89x0 adapter(s) media type(s) (rj45,bnc,aui)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /* No other value than -1 for duplex seems to be currently interpreted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) MODULE_PARM_DESC(duplex, "(ignored)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) MODULE_PARM_DESC(dma , "cs89x0 ISA DMA channel; ignored if use_dma=0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) MODULE_PARM_DESC(dmasize , "cs89x0 DMA size in kB (16,64); ignored if use_dma=0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) MODULE_PARM_DESC(use_dma , "cs89x0 using DMA (0-1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) MODULE_PARM_DESC(dma , "(ignored)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) MODULE_PARM_DESC(dmasize , "(ignored)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) MODULE_PARM_DESC(use_dma , "(ignored)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) MODULE_AUTHOR("Mike Cruse, Russwll Nelson <nelson@crynwr.com>, Andrew Morton");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) * media=t - specify media type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) * or media=2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * or media=aui
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * or medai=auto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * duplex=0 - specify forced half/full/autonegotiate duplex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * debug=# - debug level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * Default Chip Configuration:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) * DMA Burst = enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) * IOCHRDY Enabled = enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * UseSA = enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) * CS8900 defaults to half-duplex if not specified on command-line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) * CS8920 defaults to autoneg if not specified on command-line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) * Use reset defaults for other config parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) * Assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) * media type specified is supported (circuitry is present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) * if memory address is > 1MB, then required mem decode hw is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) * if 10B-2, then agent other than driver will enable DC/DC converter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * (hw or software util)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) int __init init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) struct net_local *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) #if DEBUGGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) net_debug = debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) debug = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) dev->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) dev->base_addr = io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (use_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) lp->use_dma = use_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) lp->dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) lp->dmasize = dmasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) spin_lock_init(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) /* boy, they'd better get these right */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (!strcmp(media, "rj45"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) else if (!strcmp(media, "aui"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) lp->adapter_cnf = A_CNF_MEDIA_AUI | A_CNF_AUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) else if (!strcmp(media, "bnc"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) lp->adapter_cnf = A_CNF_MEDIA_10B_2 | A_CNF_10B_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (duplex == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) lp->auto_neg_cnf = AUTO_NEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (io == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) pr_err("Module autoprobing not allowed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) pr_err("Append io=0xNNN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) } else if (io <= 0x1ff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) #if ALLOW_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (use_dma && dmasize != 16 && dmasize != 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) pr_err("dma size must be either 16K or 64K, not %dK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) dmasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) ret = cs89x0_ioport_probe(dev, io, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) dev_cs89x0 = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) void __exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct net_local *lp = netdev_priv(dev_cs89x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) unregister_netdev(dev_cs89x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) ioport_unmap(lp->virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) free_netdev(dev_cs89x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) #endif /* MODULE && !CONFIG_CS89x0_PLATFORM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) #ifdef CONFIG_CS89x0_PLATFORM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) static int __init cs89x0_platform_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) void __iomem *virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) dev->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (dev->irq <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) dev_warn(&dev->dev, "interrupt resource missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) virt_addr = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (IS_ERR(virt_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) err = PTR_ERR(virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) err = cs89x0_probe1(dev, virt_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) dev_warn(&dev->dev, "no cs8900 or cs8920 detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) platform_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) static int cs89x0_platform_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) struct net_device *dev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) /* This platform_get_resource() call will not return NULL, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * the same call in cs89x0_platform_probe() has returned a non NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) static const struct of_device_id __maybe_unused cs89x0_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) { .compatible = "cirrus,cs8900", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) { .compatible = "cirrus,cs8920", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) MODULE_DEVICE_TABLE(of, cs89x0_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) static struct platform_driver cs89x0_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) .of_match_table = of_match_ptr(cs89x0_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) .remove = cs89x0_platform_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) #endif /* CONFIG_CS89x0_PLATFORM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) MODULE_AUTHOR("Russell Nelson <nelson@crynwr.com>");