Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Network device driver for the BMAC ethernet controller on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Apple Powermacs.  Assumes it's under a DBDMA controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (C) 1998 Randy Gobbel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * dynamic procfs inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/crc32poly.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/bitrev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <asm/dbdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <asm/pmac_feature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/macio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include "bmac.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define trunc_page(x)	((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define round_page(x)	trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) /* switch to use multicast code lifted from sunhme driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define SUNHME_MULTICAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define N_RX_RING	64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define N_TX_RING	32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define MAX_TX_ACTIVE	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define ETHERCRC	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define ETHERMINPACKET	64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define ETHERMTU	1500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define RX_BUFLEN	(ETHERMTU + 14 + ETHERCRC + 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define TX_TIMEOUT	HZ	/* 1 second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) /* Bits in transmit DMA status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define TX_DMA_ERR	0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define XXDEBUG(args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) struct bmac_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	/* volatile struct bmac *bmac; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	struct sk_buff_head *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	volatile struct dbdma_regs __iomem *tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	int tx_dma_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	volatile struct dbdma_regs __iomem *rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	int rx_dma_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	volatile struct dbdma_cmd *tx_cmds;	/* xmit dma command list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	volatile struct dbdma_cmd *rx_cmds;	/* recv dma command list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	struct macio_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	int is_bmac_plus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	struct sk_buff *rx_bufs[N_RX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	int rx_fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	int rx_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	struct sk_buff *tx_bufs[N_TX_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	int tx_fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	int tx_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	unsigned char tx_fullup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct timer_list tx_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	int timeout_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	int sleeping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	int opened;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	unsigned short hash_use_count[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	unsigned short hash_table_mask[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #if 0 /* Move that to ethtool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) typedef struct bmac_reg_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	unsigned short reg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) } bmac_reg_entry_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define N_REG_ENTRIES 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	{"MEMADD", MEMADD},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	{"MEMDATAHI", MEMDATAHI},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	{"MEMDATALO", MEMDATALO},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	{"TXPNTR", TXPNTR},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	{"RXPNTR", RXPNTR},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	{"IPG1", IPG1},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	{"IPG2", IPG2},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	{"ALIMIT", ALIMIT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	{"SLOT", SLOT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	{"PALEN", PALEN},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	{"PAPAT", PAPAT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	{"TXSFD", TXSFD},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	{"JAM", JAM},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	{"TXCFG", TXCFG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	{"TXMAX", TXMAX},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	{"TXMIN", TXMIN},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	{"PAREG", PAREG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	{"DCNT", DCNT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	{"NCCNT", NCCNT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	{"NTCNT", NTCNT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	{"EXCNT", EXCNT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	{"LTCNT", LTCNT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	{"TXSM", TXSM},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	{"RXCFG", RXCFG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	{"RXMAX", RXMAX},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	{"RXMIN", RXMIN},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	{"FRCNT", FRCNT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	{"AECNT", AECNT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	{"FECNT", FECNT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	{"RXSM", RXSM},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	{"RXCV", RXCV}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) static unsigned char *bmac_emergency_rxbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * Number of bytes of private data per BMAC: allow enough for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  * the rx and tx dma commands plus a branch dma command each,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  * and another 16 bytes to allow us to align the dma command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * buffers on a 16 byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define PRIV_BYTES	(sizeof(struct bmac_data) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	+ (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	+ sizeof(struct sk_buff_head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) static int bmac_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) static int bmac_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) static void bmac_set_multicast(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) static void bmac_reset_and_enable(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) static void bmac_start_chip(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static void bmac_init_chip(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static void bmac_init_registers(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static void bmac_enable_and_reset_chip(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static int bmac_set_address(struct net_device *dev, void *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) static void bmac_set_timeout(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static void bmac_tx_timeout(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static void bmac_start(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define	DBDMA_SET(x)	( ((x) | (x) << 16) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define	DBDMA_CLEAR(x)	( (x) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	__asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) dbdma_ld32(volatile __u32 __iomem *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	__u32 swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	__asm__ volatile ("lwbrx %0,0,%1" :  "=r" (swap) : "r" (a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	return swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	dbdma_st32(&dmap->control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		   DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	eieio();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	dbdma_st32(&dmap->control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		   DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	eieio();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	while (dbdma_ld32(&dmap->status) & RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		eieio();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) dbdma_setcmd(volatile struct dbdma_cmd *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	     unsigned short cmd, unsigned count, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	     unsigned long cmd_dep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	out_le16(&cp->command, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	out_le16(&cp->req_count, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	out_le32(&cp->phy_addr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	out_le32(&cp->cmd_dep, cmd_dep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	out_le16(&cp->xfer_status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	out_le16(&cp->res_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	out_le16((void __iomem *)dev->base_addr + reg_offset, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	return in_le16((void __iomem *)dev->base_addr + reg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) bmac_enable_and_reset_chip(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	if (rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		dbdma_reset(rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	if (td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		dbdma_reset(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) #define MIFDELAY	udelay(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) bmac_mif_readbits(struct net_device *dev, int nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	unsigned int val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	while (--nb >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		bmwrite(dev, MIFCSR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		MIFDELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		if (bmread(dev, MIFCSR) & 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			val |= 1 << nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		bmwrite(dev, MIFCSR, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		MIFDELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	bmwrite(dev, MIFCSR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	MIFDELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	bmwrite(dev, MIFCSR, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	MIFDELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	int b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	while (--nb >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		b = (val & (1 << nb))? 6: 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		bmwrite(dev, MIFCSR, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		MIFDELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		bmwrite(dev, MIFCSR, b|1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		MIFDELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) bmac_mif_read(struct net_device *dev, unsigned int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	bmwrite(dev, MIFCSR, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	MIFDELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	bmac_mif_writebits(dev, ~0U, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	bmac_mif_writebits(dev, 6, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	bmac_mif_writebits(dev, addr, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	bmwrite(dev, MIFCSR, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	MIFDELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	bmwrite(dev, MIFCSR, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	MIFDELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	val = bmac_mif_readbits(dev, 17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	bmwrite(dev, MIFCSR, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	MIFDELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	bmwrite(dev, MIFCSR, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	MIFDELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	bmac_mif_writebits(dev, ~0U, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	bmac_mif_writebits(dev, 5, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	bmac_mif_writebits(dev, addr, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	bmac_mif_writebits(dev, 2, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	bmac_mif_writebits(dev, val, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	bmac_mif_writebits(dev, 3, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) bmac_init_registers(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	volatile unsigned short regValue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	unsigned short *pWord16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	/* XXDEBUG(("bmac: enter init_registers\n")); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	bmwrite(dev, RXRST, RxResetValue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	bmwrite(dev, TXRST, TxResetBit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	i = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		--i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		udelay(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	} while ((regValue & TxResetBit) && i > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	if (!bp->is_bmac_plus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		regValue = bmread(dev, XCVRIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		regValue |= ClkBit | SerialMode | COLActiveLow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		bmwrite(dev, XCVRIF, regValue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		udelay(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	bmwrite(dev, RSEED, (unsigned short)0x1968);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	regValue = bmread(dev, XIFC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	regValue |= TxOutputEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	bmwrite(dev, XIFC, regValue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	bmread(dev, PAREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	/* set collision counters to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	bmwrite(dev, NCCNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	bmwrite(dev, NTCNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	bmwrite(dev, EXCNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	bmwrite(dev, LTCNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	/* set rx counters to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	bmwrite(dev, FRCNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	bmwrite(dev, LECNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	bmwrite(dev, AECNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	bmwrite(dev, FECNT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	bmwrite(dev, RXCV, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	/* set tx fifo information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	bmwrite(dev, TXTH, 4);	/* 4 octets before tx starts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	bmwrite(dev, TXFIFOCSR, 0);	/* first disable txFIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	/* set rx fifo information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	bmwrite(dev, RXFIFOCSR, 0);	/* first disable rxFIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	//bmwrite(dev, TXCFG, TxMACEnable);	       	/* TxNeverGiveUp maybe later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	bmread(dev, STATUS);		/* read it just to clear it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	/* zero out the chip Hash Filter registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	bmwrite(dev, BHASH3, bp->hash_table_mask[0]); 	/* bits 15 - 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	bmwrite(dev, BHASH2, bp->hash_table_mask[1]); 	/* bits 31 - 16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	bmwrite(dev, BHASH1, bp->hash_table_mask[2]); 	/* bits 47 - 32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	bmwrite(dev, BHASH0, bp->hash_table_mask[3]); 	/* bits 63 - 48 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	pWord16 = (unsigned short *)dev->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	bmwrite(dev, MADD0, *pWord16++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	bmwrite(dev, MADD1, *pWord16++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	bmwrite(dev, MADD2, *pWord16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	bmwrite(dev, INTDISABLE, EnableNormal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) bmac_disable_interrupts(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	bmwrite(dev, INTDISABLE, DisableAll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) bmac_enable_interrupts(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	bmwrite(dev, INTDISABLE, EnableNormal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) bmac_start_chip(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	unsigned short	oldConfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	/* enable rx dma channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	dbdma_continue(rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	oldConfig = bmread(dev, TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	/* turn on rx plus any other bits already on (promiscuous possibly) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	oldConfig = bmread(dev, RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	udelay(20000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) bmac_init_phy(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	unsigned int addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	printk(KERN_DEBUG "phy registers:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	for (addr = 0; addr < 32; ++addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		if ((addr & 7) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			printk(KERN_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	printk(KERN_CONT "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	if (bp->is_bmac_plus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		unsigned int capable, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		ctrl = bmac_mif_read(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		if (bmac_mif_read(dev, 4) != capable ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		    (ctrl & 0x1000) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			bmac_mif_write(dev, 4, capable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			bmac_mif_write(dev, 0, 0x1200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			bmac_mif_write(dev, 0, 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) static void bmac_init_chip(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	bmac_init_phy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	bmac_init_registers(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	struct net_device* dev = macio_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	unsigned short config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	/* prolly should wait for dma to finish & turn off the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	spin_lock_irqsave(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (bp->timeout_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		del_timer(&bp->tx_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		bp->timeout_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	disable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	disable_irq(bp->tx_dma_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	disable_irq(bp->rx_dma_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	bp->sleeping = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	spin_unlock_irqrestore(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if (bp->opened) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		volatile struct dbdma_regs __iomem *td = bp->tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		config = bmread(dev, RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		bmwrite(dev, RXCFG, (config & ~RxMACEnable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		config = bmread(dev, TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)        		bmwrite(dev, TXCFG, (config & ~TxMACEnable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)        		/* disable rx and tx dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485)        		/* free some skb's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486)        		for (i=0; i<N_RX_RING; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)        			if (bp->rx_bufs[i] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)        				dev_kfree_skb(bp->rx_bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)        				bp->rx_bufs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)        			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)        		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)        		for (i = 0; i<N_TX_RING; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			if (bp->tx_bufs[i] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		       		dev_kfree_skb(bp->tx_bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	       			bp->tx_bufs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		       	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)        	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) static int bmac_resume(struct macio_dev *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	struct net_device* dev = macio_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	/* see if this is enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	if (bp->opened)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		bmac_reset_and_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	enable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)        	enable_irq(bp->tx_dma_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)        	enable_irq(bp->rx_dma_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)        	netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) static int bmac_set_address(struct net_device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	unsigned char *p = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	unsigned short *pWord16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	XXDEBUG(("bmac: enter set_address\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	spin_lock_irqsave(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	for (i = 0; i < 6; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		dev->dev_addr[i] = p[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	/* load up the hardware address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	pWord16  = (unsigned short *)dev->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	bmwrite(dev, MADD0, *pWord16++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	bmwrite(dev, MADD1, *pWord16++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	bmwrite(dev, MADD2, *pWord16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	spin_unlock_irqrestore(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	XXDEBUG(("bmac: exit set_address\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) static inline void bmac_set_timeout(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	spin_lock_irqsave(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	if (bp->timeout_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		del_timer(&bp->tx_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	add_timer(&bp->tx_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	bp->timeout_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	spin_unlock_irqrestore(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	unsigned long baddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	vaddr = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	baddr = virt_to_bus(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		     virt_to_bus(addr), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) bmac_init_tx_ring(struct bmac_data *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	bp->tx_empty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	bp->tx_fill = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	bp->tx_fullup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	/* put a branch at the end of the tx command list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		     (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	/* reset tx dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	dbdma_reset(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	out_le32(&td->wait_sel, 0x00200020);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) bmac_init_rx_ring(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	/* initialize list of sk_buffs for receiving and set up recv dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	memset((char *)bp->rx_cmds, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	       (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	for (i = 0; i < N_RX_RING; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		if ((skb = bp->rx_bufs[i]) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			if (skb != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 				skb_reserve(skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	bp->rx_empty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	bp->rx_fill = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	/* Put a branch back to the beginning of the receive command list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		     (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	/* start rx dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	dbdma_reset(rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	/* see if there's a free slot in the tx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	/* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	/* 	     bp->tx_empty, bp->tx_fill)); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	i = bp->tx_fill + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	if (i >= N_TX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	if (i == bp->tx_empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		bp->tx_fullup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		return -1;		/* can't take it at the moment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	bp->tx_bufs[bp->tx_fill] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	bp->tx_fill = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	dbdma_continue(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) static int rxintcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	struct net_device *dev = (struct net_device *) dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	volatile struct dbdma_cmd *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	int i, nb, stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	unsigned int residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	int last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	spin_lock_irqsave(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	if (++rxintcount < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		XXDEBUG(("bmac_rxdma_intr\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	last = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	i = bp->rx_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		cp = &bp->rx_cmds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		stat = le16_to_cpu(cp->xfer_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		residual = le16_to_cpu(cp->res_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		if ((stat & ACTIVE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		nb = RX_BUFLEN - residual - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		if (nb < (ETHERMINPACKET - ETHERCRC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			skb = bp->rx_bufs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			bp->rx_bufs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		if (skb != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			nb -= ETHERCRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			skb_put(skb, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			++dev->stats.rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			dev->stats.rx_bytes += nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 			++dev->stats.rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		if ((skb = bp->rx_bufs[i]) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			if (skb != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 				skb_reserve(bp->rx_bufs[i], 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		cp->res_count = cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		cp->xfer_status = cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		last = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		if (++i >= N_RX_RING) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (last != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		bp->rx_fill = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		bp->rx_empty = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	dbdma_continue(rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	spin_unlock_irqrestore(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	if (rxintcount < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		XXDEBUG(("bmac_rxdma_intr done\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static int txintcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	struct net_device *dev = (struct net_device *) dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	volatile struct dbdma_cmd *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	int stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	spin_lock_irqsave(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (txintcount++ < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		XXDEBUG(("bmac_txdma_intr\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	/*     del_timer(&bp->tx_timeout); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	/*     bp->timeout_active = 0; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		cp = &bp->tx_cmds[bp->tx_empty];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		stat = le16_to_cpu(cp->xfer_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		if (txintcount < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		if (!(stat & ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			 * status field might not have been filled by DBDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		if (bp->tx_bufs[bp->tx_empty]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			++dev->stats.tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		bp->tx_bufs[bp->tx_empty] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		bp->tx_fullup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		if (++bp->tx_empty >= N_TX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			bp->tx_empty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		if (bp->tx_empty == bp->tx_fill)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	spin_unlock_irqrestore(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	if (txintcount < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	bmac_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) #ifndef SUNHME_MULTICAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) /* Real fast bit-reversal algorithm, 6-bit values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) static int reverse6[64] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) crc416(unsigned int curval, unsigned short nxtval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	unsigned int counter, cur = curval, next = nxtval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	int high_crc_set, low_data_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	/* Swap bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	next = ((next & 0x00FF) << 8) | (next >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	/* Compute bit-by-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	for (counter = 0; counter < 16; ++counter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		/* is high CRC bit set? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		if ((cur & 0x80000000) == 0) high_crc_set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		else high_crc_set = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		cur = cur << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		if ((next & 0x0001) == 0) low_data_set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		else low_data_set = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		next = next >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		/* do the XOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	return cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) bmac_crc(unsigned short *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	unsigned int newcrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	newcrc = crc416(0xffffffff, *address);	/* address bits 47 - 32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	newcrc = crc416(newcrc, address[1]);	/* address bits 31 - 16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	newcrc = crc416(newcrc, address[2]);	/* address bits 15 - 0  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	return(newcrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857)  * Add requested mcast addr to BMac's hash table filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) bmac_addhash(struct bmac_data *bp, unsigned char *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	unsigned int	 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	unsigned short	 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if (!(*addr)) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	crc = reverse6[crc];	/* Hyperfast bit-reversing algorithm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	if (bp->hash_use_count[crc]++) return; /* This bit is already set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	mask = crc % 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	mask = (unsigned char)1 << mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	bp->hash_use_count[crc/16] |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) bmac_removehash(struct bmac_data *bp, unsigned char *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	unsigned int crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	unsigned char mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	/* Now, delete the address from the filter copy, as indicated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	crc = reverse6[crc];	/* Hyperfast bit-reversing algorithm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	mask = crc % 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	bp->hash_table_mask[crc/16] &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893)  * Sync the adapter with the software copy of the multicast mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  *  (logical address filter).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) bmac_rx_off(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	unsigned short rx_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	rx_cfg = bmread(dev, RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	rx_cfg &= ~RxMACEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	bmwrite(dev, RXCFG, rx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		rx_cfg = bmread(dev, RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	}  while (rx_cfg & RxMACEnable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) unsigned short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	unsigned short rx_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	rx_cfg = bmread(dev, RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	rx_cfg |= RxMACEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if (hash_enable) rx_cfg |= RxHashFilterEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	else rx_cfg &= ~RxHashFilterEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (promisc_enable) rx_cfg |= RxPromiscEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	else rx_cfg &= ~RxPromiscEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	bmwrite(dev, RXRST, RxResetValue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	bmwrite(dev, RXFIFOCSR, 0);	/* first disable rxFIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	bmwrite(dev, RXCFG, rx_cfg );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	return rx_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) bmac_add_multi(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	       struct bmac_data *bp, unsigned char *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	/* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	bmac_addhash(bp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	bmac_rx_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	bmac_update_hash_table_mask(dev, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	/* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) bmac_remove_multi(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		  struct bmac_data *bp, unsigned char *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	bmac_removehash(bp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	bmac_rx_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	bmac_update_hash_table_mask(dev, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) /* Set or clear the multicast filter for this adaptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962)     num_addrs == -1	Promiscuous mode, receive all packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)     num_addrs == 0	Normal mode, clear multicast list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)     num_addrs > 0	Multicast mode, receive normal and MC packets, and do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			best-effort filtering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) static void bmac_set_multicast(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	int num_addrs = netdev_mc_count(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	unsigned short rx_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (bp->sleeping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		bmac_update_hash_table_mask(dev, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		rx_cfg = bmac_rx_on(dev, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	} else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		rx_cfg = bmread(dev, RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		rx_cfg |= RxPromiscEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		bmwrite(dev, RXCFG, rx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		rx_cfg = bmac_rx_on(dev, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		if (num_addrs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			rx_cfg = bmac_rx_on(dev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 			netdev_for_each_mc_addr(ha, dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 				bmac_addhash(bp, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 			bmac_update_hash_table_mask(dev, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			rx_cfg = bmac_rx_on(dev, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	/* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) #else /* ifdef SUNHME_MULTICAST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /* The version of set_multicast below was lifted from sunhme.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static void bmac_set_multicast(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	unsigned short rx_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		bmwrite(dev, BHASH0, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		bmwrite(dev, BHASH1, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		bmwrite(dev, BHASH2, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		bmwrite(dev, BHASH3, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	} else if(dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		rx_cfg = bmread(dev, RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		rx_cfg |= RxPromiscEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		bmwrite(dev, RXCFG, rx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		u16 hash_table[4] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		rx_cfg = bmread(dev, RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		rx_cfg &= ~RxPromiscEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		bmwrite(dev, RXCFG, rx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			crc = ether_crc_le(6, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			crc >>= 26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			hash_table[crc >> 4] |= 1 << (crc & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		bmwrite(dev, BHASH0, hash_table[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		bmwrite(dev, BHASH1, hash_table[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		bmwrite(dev, BHASH2, hash_table[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		bmwrite(dev, BHASH3, hash_table[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) #endif /* SUNHME_MULTICAST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) static int miscintcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	struct net_device *dev = (struct net_device *) dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	unsigned int status = bmread(dev, STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	if (miscintcount++ < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		XXDEBUG(("bmac_misc_intr\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	/* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	/*     bmac_txdma_intr_inner(irq, dev_id); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	/*   if (status & FrameReceived) dev->stats.rx_dropped++; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	if (status & RxErrorMask) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	if (status & RxLenCntExp) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (status & RxOverFlow) dev->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	/*   if (status & FrameSent) dev->stats.tx_dropped++; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	if (status & TxErrorMask) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	if (status & TxNormalCollExp) dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)  * Procedure for reading EEPROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) #define SROMAddressLength	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) #define DataInOn		0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) #define DataInOff		0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) #define Clk			0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) #define ChipSelect		0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) #define SDIShiftCount		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) #define SD0ShiftCount		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) #define	DelayValue		1000	/* number of microseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) #define SROMStartOffset		10	/* this is in words */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) #define SROMReadCount		3	/* number of words to read from SROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) #define SROMAddressBits		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) #define EnetAddressOffset	20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static unsigned char
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) bmac_clock_out_bit(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	unsigned short         data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	unsigned short         val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	bmwrite(dev, SROMCSR, ChipSelect | Clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	udelay(DelayValue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	data = bmread(dev, SROMCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	udelay(DelayValue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	val = (data >> SD0ShiftCount) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	bmwrite(dev, SROMCSR, ChipSelect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	udelay(DelayValue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) bmac_clock_in_bit(struct net_device *dev, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	unsigned short data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	if (val != 0 && val != 1) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	data = (val << SDIShiftCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	bmwrite(dev, SROMCSR, data | ChipSelect  );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	udelay(DelayValue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	udelay(DelayValue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	bmwrite(dev, SROMCSR, data | ChipSelect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	udelay(DelayValue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) reset_and_select_srom(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	/* first reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	bmwrite(dev, SROMCSR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	udelay(DelayValue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	/* send it the read command (110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	bmac_clock_in_bit(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	bmac_clock_in_bit(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	bmac_clock_in_bit(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static unsigned short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	unsigned short data, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	/* send out the address we want to read from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	for (i = 0; i < addr_len; i++)	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		val = addr >> (addr_len-i-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		bmac_clock_in_bit(dev, val & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	/* Now read in the 16-bit data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	for (i = 0; i < 16; i++)	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		val = bmac_clock_out_bit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		data <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		data |= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	bmwrite(dev, SROMCSR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)  * It looks like Cogent and SMC use different methods for calculating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)  * checksums. What a pain..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) bmac_verify_checksum(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	unsigned short data, storedCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	reset_and_select_srom(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	data = read_srom(dev, 3, SROMAddressBits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) bmac_get_station_address(struct net_device *dev, unsigned char *ea)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	unsigned short data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			reset_and_select_srom(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 			data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			ea[2*i]   = bitrev8(data & 0x0ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) static void bmac_reset_and_enable(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	unsigned char *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	spin_lock_irqsave(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	bmac_enable_and_reset_chip(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	bmac_init_tx_ring(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	bmac_init_rx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	bmac_init_chip(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	bmac_start_chip(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	bmwrite(dev, INTDISABLE, EnableNormal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	bp->sleeping = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	 * It seems that the bmac can't receive until it's transmitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	 * a packet.  So we give it a dummy packet to transmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	skb = netdev_alloc_skb(dev, ETHERMINPACKET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	if (skb != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		data = skb_put_zero(skb, ETHERMINPACKET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		memcpy(data, dev->dev_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		bmac_transmit_packet(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	spin_unlock_irqrestore(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) static const struct ethtool_ops bmac_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	.get_link		= ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) static const struct net_device_ops bmac_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	.ndo_open		= bmac_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	.ndo_stop		= bmac_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	.ndo_start_xmit		= bmac_output,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	.ndo_set_rx_mode	= bmac_set_multicast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	.ndo_set_mac_address	= bmac_set_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	int j, rev, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	struct bmac_data *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	const unsigned char *prop_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	unsigned char addr[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	int is_bmac_plus = ((int)match->data) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	prop_addr = of_get_property(macio_get_of_node(mdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			"mac-address", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	if (prop_addr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		prop_addr = of_get_property(macio_get_of_node(mdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 				"local-mac-address", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		if (prop_addr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			printk(KERN_ERR "BMAC: Can't get mac-address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	memcpy(addr, prop_addr, sizeof(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	dev = alloc_etherdev(PRIV_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	macio_set_drvdata(mdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	bp->mdev = mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	spin_lock_init(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	if (macio_request_resources(mdev, "bmac")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		printk(KERN_ERR "BMAC: can't request IO resource !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	dev->base_addr = (unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	if (dev->base_addr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	dev->irq = macio_irq(mdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	bmac_enable_and_reset_chip(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	bmwrite(dev, INTDISABLE, DisableAll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	rev = addr[0] == 0 && addr[1] == 0xA0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	for (j = 0; j < 6; ++j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	/* Enable chip without interrupts for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	bmac_enable_and_reset_chip(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	bmwrite(dev, INTDISABLE, DisableAll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	dev->netdev_ops = &bmac_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	dev->ethtool_ops = &bmac_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	bmac_get_station_address(dev, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (bmac_verify_checksum(dev) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	bp->is_bmac_plus = is_bmac_plus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	if (!bp->tx_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	bp->tx_dma_intr = macio_irq(mdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	if (!bp->rx_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		goto err_out_iounmap_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	bp->rx_dma_intr = macio_irq(mdev, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	skb_queue_head_init(bp->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		goto err_out_iounmap_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		goto err_out_irq0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		goto err_out_irq1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	/* Mask chip interrupts and disable chip, will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	 * re-enabled on open()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	disable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	if (register_netdev(dev) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		printk(KERN_ERR "BMAC: Ethernet registration failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		goto err_out_irq2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	printk(KERN_INFO "%s: BMAC%s at %pM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	       dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	XXDEBUG((", base_addr=%#0lx", dev->base_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) err_out_irq2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	free_irq(bp->rx_dma_intr, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) err_out_irq1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	free_irq(bp->tx_dma_intr, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) err_out_irq0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) err_out_iounmap_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	iounmap(bp->rx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) err_out_iounmap_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	iounmap(bp->tx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) err_out_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	iounmap((void __iomem *)dev->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) out_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	macio_release_resources(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static int bmac_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	/* XXDEBUG(("bmac: enter open\n")); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	/* reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	bp->opened = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	bmac_reset_and_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	enable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static int bmac_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	unsigned short config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	bp->sleeping = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	/* disable rx and tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	config = bmread(dev, RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	bmwrite(dev, RXCFG, (config & ~RxMACEnable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	config = bmread(dev, TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	bmwrite(dev, TXCFG, (config & ~TxMACEnable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	/* disable rx and tx dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	/* free some skb's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	XXDEBUG(("bmac: free rx bufs\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	for (i=0; i<N_RX_RING; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		if (bp->rx_bufs[i] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			dev_kfree_skb(bp->rx_bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			bp->rx_bufs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	XXDEBUG(("bmac: free tx bufs\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	for (i = 0; i<N_TX_RING; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		if (bp->tx_bufs[i] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 			dev_kfree_skb(bp->tx_bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 			bp->tx_bufs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	XXDEBUG(("bmac: all bufs freed\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	bp->opened = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	disable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) bmac_start(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	if (bp->sleeping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	spin_lock_irqsave(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		i = bp->tx_fill + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		if (i >= N_TX_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 			i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		if (i == bp->tx_empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		skb = skb_dequeue(bp->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		bmac_transmit_packet(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	spin_unlock_irqrestore(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) bmac_output(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	skb_queue_tail(bp->queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	bmac_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) static void bmac_tx_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	struct bmac_data *bp = from_timer(bp, t, tx_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	struct net_device *dev = macio_get_drvdata(bp->mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	volatile struct dbdma_cmd *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	unsigned short config, oldConfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	XXDEBUG(("bmac: tx_timeout called\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	spin_lock_irqsave(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	bp->timeout_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	/* update various counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) /*     	bmac_handle_misc_intrs(bp, 0); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	cp = &bp->tx_cmds[bp->tx_empty];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /*	XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) /* 	   le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /* 	   mb->pr, mb->xmtfs, mb->fifofc)); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	/* turn off both tx and rx and reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	config = bmread(dev, RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	bmwrite(dev, RXCFG, (config & ~RxMACEnable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	config = bmread(dev, TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	bmwrite(dev, TXCFG, (config & ~TxMACEnable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	printk(KERN_ERR "bmac: transmit timeout - resetting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	bmac_enable_and_reset_chip(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	/* restart rx dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	out_le16(&cp->xfer_status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	out_le32(&rd->cmdptr, virt_to_bus(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	/* fix up the transmit side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	i = bp->tx_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	++dev->stats.tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	if (i != bp->tx_fill) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		dev_kfree_skb(bp->tx_bufs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		bp->tx_bufs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		if (++i >= N_TX_RING) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		bp->tx_empty = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	bp->tx_fullup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	if (i != bp->tx_fill) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		cp = &bp->tx_cmds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		out_le16(&cp->xfer_status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		out_le16(&cp->command, OUTPUT_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		out_le32(&td->cmdptr, virt_to_bus(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		out_le32(&td->control, DBDMA_SET(RUN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		/* 	bmac_set_timeout(dev); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	/* turn it back on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	oldConfig = bmread(dev, RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	oldConfig = bmread(dev, TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	spin_unlock_irqrestore(&bp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	int i,*ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	for (i=0;i< count;i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		ip = (int*)(cp+i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		       le32_to_cpup(ip+0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		       le32_to_cpup(ip+1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		       le32_to_cpup(ip+2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		       le32_to_cpup(ip+3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) bmac_proc_info(char *buffer, char **start, off_t offset, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	off_t pos   = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	off_t begin = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	if (bmac_devs == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	len += sprintf(buffer, "BMAC counters & registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	for (i = 0; i<N_REG_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		len += sprintf(buffer + len, "%s: %#08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 			       reg_entries[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 			       bmread(bmac_devs, reg_entries[i].reg_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		pos = begin + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		if (pos < offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 			len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 			begin = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		if (pos > offset+length) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	*start = buffer + (offset - begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	len -= (offset - begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	if (len > length) len = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) static int bmac_remove(struct macio_dev *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	struct net_device *dev = macio_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	struct bmac_data *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)        	free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	free_irq(bp->tx_dma_intr, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	free_irq(bp->rx_dma_intr, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	iounmap((void __iomem *)dev->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	iounmap(bp->tx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	iounmap(bp->rx_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	macio_release_resources(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) static const struct of_device_id bmac_match[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	.name 		= "bmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	.data		= (void *)0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	.type		= "network",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	.compatible	= "bmac+",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	.data		= (void *)1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) MODULE_DEVICE_TABLE (of, bmac_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) static struct macio_driver bmac_driver =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		.name 		= "bmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		.of_match_table	= bmac_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	.probe		= bmac_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	.remove		= bmac_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	.suspend	= bmac_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	.resume		= bmac_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static int __init bmac_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	if (bmac_emergency_rxbuf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		if (bmac_emergency_rxbuf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	return macio_register_driver(&bmac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) static void __exit bmac_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	macio_unregister_driver(&bmac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	kfree(bmac_emergency_rxbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	bmac_emergency_rxbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) module_init(bmac_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) module_exit(bmac_exit);