Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *	Driver for the Macintosh 68K onboard MACE controller with PSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *	driven DMA. The MACE driver code is derived from mace.c. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *	Mac68k theory of operation is courtesy of the MacBSD wizards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *	Copyright (C) 1996 Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *	Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *	Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *	Copyright (C) 2007 Finn Thain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *	Converted to DMA API, converted to unified driver model,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *	sync'd some routines with mace.c and fixed various bugs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/bitrev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <asm/macints.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <asm/mac_psc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include "mace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static char mac_mace_string[] = "macmace";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define N_TX_BUFF_ORDER	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define N_TX_RING	(1 << N_TX_BUFF_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define N_RX_BUFF_ORDER	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define N_RX_RING	(1 << N_RX_BUFF_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define TX_TIMEOUT	HZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define MACE_BUFF_SIZE	0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) /* Chip rev needs workaround on HW & multicast addr change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define BROKEN_ADDRCHG_REV	0x0941
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) /* The MACE is simply wired down on a Mac68K box */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define MACE_BASE	(void *)(0x50F1C000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define MACE_PROM	(void *)(0x50F08001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) struct mace_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	volatile struct mace *mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	unsigned char *tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	dma_addr_t tx_ring_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	unsigned char *rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	dma_addr_t rx_ring_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	int dma_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	int rx_slot, rx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	int tx_slot, tx_sloti, tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	int chipid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) struct mace_frame {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	u8	rcvcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	u8	pad1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	u8	rcvsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	u8	pad2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	u8	rntpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	u8	pad3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	u8	rcvcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	u8	pad4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	u32	pad5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	u32	pad6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	u8	data[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	/* And frame continues.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #define PRIV_BYTES	sizeof(struct mace_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static int mace_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static int mace_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static void mace_set_multicast(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static int mace_set_address(struct net_device *dev, void *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) static void mace_reset(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static irqreturn_t mace_interrupt(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static irqreturn_t mace_dma_intr(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static void __mace_set_address(struct net_device *dev, void *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * Load a receive DMA channel with a base address and ring length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static void mace_load_rxdma_base(struct net_device *dev, int set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	mp->rx_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * Reset the receive DMA subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void mace_rxdma_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	volatile struct mace *mace = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	u8 maccc = mace->maccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	mace->maccc = maccc & ~ENRCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	mace_load_rxdma_base(dev, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	mace_load_rxdma_base(dev, 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	mace->maccc = maccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	mp->rx_slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * Reset the transmit DMA subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void mace_txdma_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	volatile struct mace *mace = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	u8 maccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	psc_write_word(PSC_ENETWR_CTL, 0x8800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	maccc = mace->maccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	mace->maccc = maccc & ~ENXMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	mp->tx_slot = mp->tx_sloti = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	mp->tx_count = N_TX_RING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	psc_write_word(PSC_ENETWR_CTL, 0x0400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	mace->maccc = maccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * Disable DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static void mace_dma_off(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	psc_write_word(PSC_ENETRD_CTL, 0x8800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	psc_write_word(PSC_ENETRD_CTL, 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	psc_write_word(PSC_ENETWR_CTL, 0x8800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	psc_write_word(PSC_ENETWR_CTL, 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static const struct net_device_ops mace_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	.ndo_open		= mace_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	.ndo_stop		= mace_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	.ndo_start_xmit		= mace_xmit_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	.ndo_tx_timeout		= mace_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	.ndo_set_rx_mode	= mace_set_multicast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	.ndo_set_mac_address	= mace_set_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * Not really much of a probe. The hardware table tells us if this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  * model of Macintrash has a MACE (AV macintoshes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static int mace_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	struct mace_data *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	unsigned char *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	unsigned char checksum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	dev = alloc_etherdev(PRIV_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	mp->device = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	platform_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	dev->base_addr = (u32)MACE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	mp->mace = MACE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	dev->irq = IRQ_MAC_MACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	mp->dma_intr = IRQ_MAC_MACE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	 * The PROM contains 8 bytes which total 0xFF when XOR'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	 * together. Due to the usual peculiar apple brain damage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	 * the bytes are spaced out in a strange boundary and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	 * bits are reversed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	addr = MACE_PROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	for (j = 0; j < 6; ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		u8 v = bitrev8(addr[j<<4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		checksum ^= v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		dev->dev_addr[j] = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	for (; j < 8; ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		checksum ^= bitrev8(addr[j<<4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (checksum != 0xFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	dev->netdev_ops		= &mace_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	dev->watchdog_timeo	= TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	pr_info("Onboard MACE, hardware address %pM, chip revision 0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		dev->dev_addr, mp->chipid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * Reset the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void mace_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	volatile struct mace *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	/* soft-reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	i = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	while (--i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		mb->biucc = SWRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		if (mb->biucc & SWRST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (!i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		printk(KERN_ERR "macmace: cannot reset chip!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	mb->maccc = 0;	/* turn off tx, rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	mb->imr = 0xFF;	/* disable all intrs for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	i = mb->ir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	mb->biucc = XMTSP_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	mb->utr = RTRD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	mb->rcvfc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	/* load up the hardware address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	__mace_set_address(dev, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	/* clear the multicast filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	if (mp->chipid == BROKEN_ADDRCHG_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		mb->iac = LOGADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		mb->iac = ADDRCHG | LOGADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		while ((mb->iac & ADDRCHG) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	for (i = 0; i < 8; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		mb->ladrf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	/* done changing address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	if (mp->chipid != BROKEN_ADDRCHG_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		mb->iac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	mb->plscc = PORTSEL_AUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)  * Load the address on a mace controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static void __mace_set_address(struct net_device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	volatile struct mace *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	unsigned char *p = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	/* load up the hardware address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (mp->chipid == BROKEN_ADDRCHG_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		mb->iac = PHYADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		mb->iac = ADDRCHG | PHYADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		while ((mb->iac & ADDRCHG) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 			;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	for (i = 0; i < 6; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		mb->padr = dev->dev_addr[i] = p[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	if (mp->chipid != BROKEN_ADDRCHG_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		mb->iac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int mace_set_address(struct net_device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	volatile struct mace *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	u8 maccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	maccc = mb->maccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	__mace_set_address(dev, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	mb->maccc = maccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)  * Open the Macintosh MACE. Most of this is playing with the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  * engine. The ethernet chip is quite friendly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static int mace_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	volatile struct mace *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	/* reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	mace_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	/* Allocate the DMA ring buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	mp->tx_ring = dma_alloc_coherent(mp->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 					 N_TX_RING * MACE_BUFF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 					 &mp->tx_ring_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	if (mp->tx_ring == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	mp->rx_ring = dma_alloc_coherent(mp->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 					 N_RX_RING * MACE_BUFF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 					 &mp->rx_ring_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	if (mp->rx_ring == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	mace_dma_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	/* Not sure what these do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	psc_write_word(PSC_ENETWR_CTL, 0x9000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	psc_write_word(PSC_ENETRD_CTL, 0x9000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	psc_write_word(PSC_ENETWR_CTL, 0x0400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	psc_write_word(PSC_ENETRD_CTL, 0x0400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	mace_rxdma_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	mace_txdma_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	/* turn it on! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	mb->maccc = ENXMT | ENRCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	/* enable all interrupts except receive interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	mb->imr = RCVINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	                  mp->tx_ring, mp->tx_ring_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	free_irq(mp->dma_intr, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  * Shut down the mace and its interrupt channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static int mace_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	volatile struct mace *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	mb->maccc = 0;		/* disable rx and tx	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	mb->imr = 0xFF;		/* disable all irqs	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	mace_dma_off(dev);	/* disable rx and tx dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)  * Transmit a frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	/* Stop the queue since there's only the one buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	if (!mp->tx_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	mp->tx_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	/* We need to copy into our xmit buffer to take care of alignment and caching issues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	/* load the Tx DMA and fire it off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32)  mp->tx_ring_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	mp->tx_slot ^= 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static void mace_set_multicast(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	volatile struct mace *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	u8 maccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	maccc = mb->maccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	mb->maccc &= ~PROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		mb->maccc |= PROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		unsigned char multicast_filter[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 			for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 				multicast_filter[i] = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 			for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 				multicast_filter[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 			netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 				crc = ether_crc_le(6, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 				/* bit number in multicast_filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 				i = crc >> 26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 				multicast_filter[i >> 3] |= 1 << (i & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		if (mp->chipid == BROKEN_ADDRCHG_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 			mb->iac = LOGADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 			mb->iac = ADDRCHG | LOGADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 			while ((mb->iac & ADDRCHG) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 				;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		for (i = 0; i < 8; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			mb->ladrf = multicast_filter[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		if (mp->chipid != BROKEN_ADDRCHG_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 			mb->iac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	mb->maccc = maccc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static void mace_handle_misc_intrs(struct net_device *dev, int intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	volatile struct mace *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	static int mace_babbles, mace_jabbers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	if (intr & MPCO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		dev->stats.rx_missed_errors += 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	dev->stats.rx_missed_errors += mb->mpc;   /* reading clears it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (intr & RNTPCO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		dev->stats.rx_length_errors += 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	if (intr & CERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		++dev->stats.tx_heartbeat_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	if (intr & BABBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		if (mace_babbles++ < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			printk(KERN_DEBUG "macmace: babbling transmitter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	if (intr & JABBER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		if (mace_jabbers++ < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 			printk(KERN_DEBUG "macmace: jabbering transceiver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static irqreturn_t mace_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	struct net_device *dev = (struct net_device *) dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	volatile struct mace *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	int intr, fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	/* don't want the dma interrupt handler to fire */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	intr = mb->ir; /* read interrupt register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	mace_handle_misc_intrs(dev, intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	if (intr & XMTINT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		fs = mb->xmtfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		if ((fs & XMTSV) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 			printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			mace_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			 * XXX mace likes to hang the machine after a xmtfs error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 			 * This is hard to reproduce, resetting *may* help
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		/* dma should have finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		if (!mp->tx_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 			printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		/* Update stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		if (fs & (UFLO|LCOL|LCAR|RTRY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 			++dev->stats.tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 			if (fs & LCAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 				++dev->stats.tx_carrier_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 			else if (fs & (UFLO|LCOL|RTRY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 				++dev->stats.tx_aborted_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 				if (mb->xmtfs & UFLO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 					dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 					mace_txdma_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	if (mp->tx_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	volatile struct mace *mb = mp->mace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	/* turn off both tx and rx and reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	mb->maccc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	printk(KERN_ERR "macmace: transmit timeout - resetting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	mace_txdma_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	mace_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	/* restart rx dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	mace_rxdma_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	mp->tx_count = N_TX_RING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	/* turn it on! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	mb->maccc = ENXMT | ENRCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	/* enable all interrupts except receive interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	mb->imr = RCVINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)  * Handle a newly arrived frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	unsigned int frame_status = mf->rcvsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		if (frame_status & RS_OFLO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 			dev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		if (frame_status & RS_CLSN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 			dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		if (frame_status & RS_FRAMERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 			dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		if (frame_status & RS_FCSERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 			dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		skb = netdev_alloc_skb(dev, frame_length + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		skb_reserve(skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 		skb_put_data(skb, mf->data, frame_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		dev->stats.rx_bytes += frame_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)  * The PSC has passed us a DMA interrupt event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static irqreturn_t mace_dma_intr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	struct net_device *dev = (struct net_device *) dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	int left, head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	u32 baka;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	/* Not sure what this does */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	if (!(baka & 0x60000000)) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	 * Process the read queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	status = psc_read_word(PSC_ENETRD_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	if (status & 0x2000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		mace_rxdma_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	} else if (status & 0x0100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 		head = N_RX_RING - left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		/* Loop through the ring buffer and process new packages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		while (mp->rx_tail < head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 			mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 				+ (mp->rx_tail * MACE_BUFF_SIZE)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 			mp->rx_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		/* If we're out of buffers in this ring then switch to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		/* the other set, otherwise just reactivate this one.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		if (!left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 			mace_load_rxdma_base(dev, mp->rx_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 			mp->rx_slot ^= 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 			psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	 * Process the write queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	status = psc_read_word(PSC_ENETWR_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	if (status & 0x2000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		mace_txdma_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	} else if (status & 0x0100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 		psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 		mp->tx_sloti ^= 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 		mp->tx_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) MODULE_ALIAS("platform:macmace");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static int mac_mace_device_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	struct net_device *dev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	struct mace_data *mp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	free_irq(IRQ_MAC_MACE_DMA, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	                  mp->rx_ring, mp->rx_ring_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	                  mp->tx_ring, mp->tx_ring_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static struct platform_driver mac_mace_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	.probe  = mace_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	.remove = mac_mace_device_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	.driver	= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 		.name	= mac_mace_string,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) module_platform_driver(mac_mace_driver);