Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  . smc91x.h - macros for SMSC's 91C9x/91C1xx single-chip Ethernet device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  . Copyright (C) 1996 by Erik Stahlman
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  . Copyright (C) 2001 Standard Microsystems Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  .	Developed by Simple Network Magic Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  . Copyright (C) 2003 Monta Vista Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  .	Unified SMC91x driver by Nicolas Pitre
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  . Information contained in this file was obtained from the LAN91C111
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  . manual from SMC.  To get a copy, if you really want one, you can find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  . information under www.smsc.com.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  . Authors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  .	Erik Stahlman		<erik@vt.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  .	Daris A Nevil		<dnevil@snmc.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  .	Nicolas Pitre 		<nico@fluxnic.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  ---------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #ifndef _SMC91X_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #define _SMC91X_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/smc91x.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * Any 16-bit access is performed with two 8-bit accesses if the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * can't do it directly. Most registers are 16-bit so those are mandatory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define SMC_outw_b(x, a, r)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 		unsigned int __val16 = (x);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 		unsigned int __reg = (r);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 		SMC_outb(__val16, a, __reg);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 		SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define SMC_inw_b(a, r)							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 		unsigned int __val16;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 		unsigned int __reg = r;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 		__val16  = SMC_inb(a, __reg);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 		__val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 		__val16;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * Define your architecture specific bus configuration parameters here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #if defined(CONFIG_ARM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <asm/mach-types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) /* Now the bus width is specified in the platform data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  * pretend here to support all I/O access types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define SMC_CAN_USE_8BIT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define SMC_CAN_USE_16BIT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define SMC_CAN_USE_32BIT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define SMC_NOWAIT		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define SMC_IO_SHIFT		(lp->io_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define SMC_inb(a, r)		readb((a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define SMC_inw(a, r)							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		unsigned int __smc_r = r;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		SMC_16BIT(lp) ? readw((a) + __smc_r) :			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) :			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		({ BUG(); 0; });					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define SMC_inl(a, r)		readl((a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define SMC_outb(v, a, r)	writeb(v, (a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define SMC_outw(lp, v, a, r)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		unsigned int __v = v, __smc_r = r;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		if (SMC_16BIT(lp))					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 			__SMC_outw(lp, __v, a, __smc_r);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		else if (SMC_8BIT(lp))					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 			SMC_outw_b(__v, a, __smc_r);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		else							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 			BUG();						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define SMC_outl(v, a, r)	writel(v, (a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define SMC_insb(a, r, p, l)	readsb((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define SMC_outsb(a, r, p, l)	writesb((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define SMC_insw(a, r, p, l)	readsw((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define SMC_outsw(a, r, p, l)	writesw((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define SMC_insl(a, r, p, l)	readsl((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define SMC_outsl(a, r, p, l)	writesl((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define SMC_IRQ_FLAGS		(-1)	/* from resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) /* We actually can't write halfwords properly if not word aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 				    bool use_align4_workaround)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	if (use_align4_workaround) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		unsigned int v = val << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		v |= readl(ioaddr + (reg & ~2)) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		writel(v, ioaddr + (reg & ~2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		writew(val, ioaddr + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define __SMC_outw(lp, v, a, r)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	_SMC_outw_align4((v), (a), (r),					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 			 IS_BUILTIN(CONFIG_ARCH_PXA) && ((r) & 2) &&	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 			 (lp)->cfg.pxa_u16_align4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #elif	defined(CONFIG_SH_SH4202_MICRODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #define SMC_CAN_USE_8BIT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #define SMC_CAN_USE_16BIT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #define SMC_CAN_USE_32BIT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #define SMC_inb(a, r)		inb((a) + (r) - 0xa0000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #define SMC_inw(a, r)		inw((a) + (r) - 0xa0000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) #define SMC_inl(a, r)		inl((a) + (r) - 0xa0000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #define SMC_outb(v, a, r)	outb(v, (a) + (r) - 0xa0000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #define SMC_outw(lp, v, a, r)	outw(v, (a) + (r) - 0xa0000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define SMC_outl(v, a, r)	outl(v, (a) + (r) - 0xa0000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #define SMC_insl(a, r, p, l)	insl((a) + (r) - 0xa0000000, p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define SMC_outsl(a, r, p, l)	outsl((a) + (r) - 0xa0000000, p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define SMC_insw(a, r, p, l)	insw((a) + (r) - 0xa0000000, p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define SMC_outsw(a, r, p, l)	outsw((a) + (r) - 0xa0000000, p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define SMC_IRQ_FLAGS		(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #elif defined(CONFIG_ATARI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) #define SMC_CAN_USE_8BIT        1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define SMC_CAN_USE_16BIT       1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #define SMC_CAN_USE_32BIT       1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #define SMC_NOWAIT              1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define SMC_inb(a, r)           readb((a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define SMC_inw(a, r)           readw((a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define SMC_inl(a, r)           readl((a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #define SMC_outb(v, a, r)       writeb(v, (a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) #define SMC_outw(lp, v, a, r)   writew(v, (a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) #define SMC_outl(v, a, r)       writel(v, (a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define SMC_insw(a, r, p, l)    readsw((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) #define SMC_outsw(a, r, p, l)   writesw((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) #define SMC_insl(a, r, p, l)    readsl((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #define SMC_outsl(a, r, p, l)   writesl((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) #define RPC_LSA_DEFAULT         RPC_LED_100_10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) #define RPC_LSB_DEFAULT         RPC_LED_TX_RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #elif defined(CONFIG_COLDFIRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define SMC_CAN_USE_8BIT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define SMC_CAN_USE_16BIT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define SMC_CAN_USE_32BIT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define SMC_NOWAIT		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) static inline void mcf_insw(void *a, unsigned char *p, int l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	u16 *wp = (u16 *) p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	while (l-- > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		*wp++ = readw(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) static inline void mcf_outsw(void *a, unsigned char *p, int l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	u16 *wp = (u16 *) p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	while (l-- > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		writew(*wp++, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #define SMC_inw(a, r)		_swapw(readw((a) + (r)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #define SMC_outw(lp, v, a, r)	writew(_swapw(v), (a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) #define SMC_insw(a, r, p, l)	mcf_insw(a + r, p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #define SMC_outsw(a, r, p, l)	mcf_outsw(a + r, p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) #define SMC_IRQ_FLAGS		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) #elif defined(CONFIG_H8300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) #define SMC_CAN_USE_8BIT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) #define SMC_CAN_USE_16BIT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) #define SMC_CAN_USE_32BIT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) #define SMC_NOWAIT		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) #define SMC_inb(a, r)		ioread8((a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) #define SMC_outb(v, a, r)	iowrite8(v, (a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) #define SMC_insb(a, r, p, l)	ioread8_rep((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) #define SMC_outsb(a, r, p, l)	iowrite8_rep((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * Default configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) #define SMC_CAN_USE_8BIT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) #define SMC_CAN_USE_16BIT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) #define SMC_CAN_USE_32BIT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) #define SMC_NOWAIT		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) #define SMC_IO_SHIFT		(lp->io_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) #define SMC_inb(a, r)		ioread8((a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) #define SMC_inw(a, r)		ioread16((a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) #define SMC_inl(a, r)		ioread32((a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) #define SMC_outb(v, a, r)	iowrite8(v, (a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) #define SMC_outw(lp, v, a, r)	iowrite16(v, (a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) #define SMC_outl(v, a, r)	iowrite32(v, (a) + (r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) #define SMC_insw(a, r, p, l)	ioread16_rep((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) #define SMC_outsw(a, r, p, l)	iowrite16_rep((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #define SMC_insl(a, r, p, l)	ioread32_rep((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) #define SMC_outsl(a, r, p, l)	iowrite32_rep((a) + (r), p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) #define RPC_LSA_DEFAULT		RPC_LED_100_10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) #define RPC_LSB_DEFAULT		RPC_LED_TX_RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) /* store this information for the driver.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) struct smc_local {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	 * If I have to wait until memory is available to send a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	 * packet, I will store the skbuff here, until I get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	 * desired memory.  Then, I'll send it out and free it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	struct sk_buff *pending_tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	struct tasklet_struct tx_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	struct gpio_desc *power_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	struct gpio_desc *reset_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	/* version/revision of the SMC91x chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	int	version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	/* Contains the current active transmission mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	int	tcr_cur_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	/* Contains the current active receive mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	int	rcr_cur_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	/* Contains the current active receive/phy mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	int	rpc_cur_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	int	ctl_rfduplx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	int	ctl_rspeed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	u32	msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	u32	phy_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct mii_if_info mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	/* work queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	struct work_struct phy_configure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	int	work_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) #ifdef CONFIG_ARCH_PXA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	/* DMA needs the physical address of the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	u_long physaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	struct device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	void __iomem *datacs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	/* the low address lines on some platforms aren't connected... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	int	io_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	/* on some platforms a u16 write must be 4-bytes aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	bool	half_word_align4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	struct smc91x_platdata cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) #define SMC_8BIT(p)	((p)->cfg.flags & SMC91X_USE_8BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) #define SMC_16BIT(p)	((p)->cfg.flags & SMC91X_USE_16BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) #define SMC_32BIT(p)	((p)->cfg.flags & SMC91X_USE_32BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) #ifdef CONFIG_ARCH_PXA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  * always happening in irq context so no need to worry about races.  TX is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  * different and probably not worth it for that reason, and not as critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  * as RX which can overrun memory and lose packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) #ifdef SMC_insl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) #undef SMC_insl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) #define SMC_insl(a, r, p, l) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	smc_pxa_dma_insl(a, lp, r, dev->dma, p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) smc_pxa_dma_inpump(struct smc_local *lp, u_char *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	dma_addr_t dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	tx = dmaengine_prep_slave_single(lp->dma_chan, dmabuf, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 					 DMA_DEV_TO_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	if (tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		cookie = dmaengine_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		dma_async_issue_pending(lp->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			status = dmaengine_tx_status(lp->dma_chan, cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 						     &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		} while (status != DMA_COMPLETE && status != DMA_ERROR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			 state.residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		dmaengine_terminate_all(lp->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		 u_char *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	struct dma_slave_config	config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	/* fallback if no DMA available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	if (!lp->dma_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		readsl(ioaddr + reg, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	/* 64 bit alignment is required for memory to memory DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	if ((long)buf & 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		*((u32 *)buf) = SMC_inl(ioaddr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		buf += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	memset(&config, 0, sizeof(config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	config.src_addr = lp->physaddr + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	config.dst_addr = lp->physaddr + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	config.src_maxburst = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	config.dst_maxburst = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	ret = dmaengine_slave_config(lp->dma_chan, &config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		dev_err(lp->device, "dma channel configuration failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 			ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	len *= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	smc_pxa_dma_inpump(lp, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) #ifdef SMC_insw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) #undef SMC_insw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) #define SMC_insw(a, r, p, l) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	smc_pxa_dma_insw(a, lp, r, dev->dma, p, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		 u_char *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	struct dma_slave_config	config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	/* fallback if no DMA available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	if (!lp->dma_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		readsw(ioaddr + reg, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	/* 64 bit alignment is required for memory to memory DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	while ((long)buf & 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		*((u16 *)buf) = SMC_inw(ioaddr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		buf += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	memset(&config, 0, sizeof(config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	config.src_addr = lp->physaddr + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	config.dst_addr = lp->physaddr + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	config.src_maxburst = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	config.dst_maxburst = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	ret = dmaengine_slave_config(lp->dma_chan, &config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		dev_err(lp->device, "dma channel configuration failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	len *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	smc_pxa_dma_inpump(lp, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) #endif  /* CONFIG_ARCH_PXA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  * Everything a particular hardware setup needs should have been defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  * at this point.  Add stubs for the undefined cases, mainly to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  * compilation warnings since they'll be optimized away, or to prevent buggy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)  * use of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) #if ! SMC_CAN_USE_32BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) #define SMC_inl(ioaddr, reg)		({ BUG(); 0; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) #define SMC_outl(x, ioaddr, reg)	BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) #define SMC_insl(a, r, p, l)		BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) #define SMC_outsl(a, r, p, l)		BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) #if !defined(SMC_insl) || !defined(SMC_outsl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) #define SMC_insl(a, r, p, l)		BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) #define SMC_outsl(a, r, p, l)		BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) #if ! SMC_CAN_USE_16BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) #define SMC_outw(lp, x, ioaddr, reg)	SMC_outw_b(x, ioaddr, reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) #define SMC_inw(ioaddr, reg)		SMC_inw_b(ioaddr, reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) #define SMC_insw(a, r, p, l)		BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) #define SMC_outsw(a, r, p, l)		BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) #if !defined(SMC_insw) || !defined(SMC_outsw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) #define SMC_insw(a, r, p, l)		BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) #define SMC_outsw(a, r, p, l)		BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) #if ! SMC_CAN_USE_8BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) #undef SMC_inb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) #define SMC_inb(ioaddr, reg)		({ BUG(); 0; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) #undef SMC_outb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) #define SMC_outb(x, ioaddr, reg)	BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) #define SMC_insb(a, r, p, l)		BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) #define SMC_outsb(a, r, p, l)		BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) #if !defined(SMC_insb) || !defined(SMC_outsb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) #define SMC_insb(a, r, p, l)		BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) #define SMC_outsb(a, r, p, l)		BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) #ifndef SMC_CAN_USE_DATACS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) #define SMC_CAN_USE_DATACS	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) #ifndef SMC_IO_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) #define SMC_IO_SHIFT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) #ifndef	SMC_IRQ_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) #define	SMC_IRQ_FLAGS		IRQF_TRIGGER_RISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) #ifndef SMC_INTERRUPT_PREAMBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) #define SMC_INTERRUPT_PREAMBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) /* Because of bank switching, the LAN91x uses only 16 I/O ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) #define SMC_IO_EXTENT	(16 << SMC_IO_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) #define SMC_DATA_EXTENT (4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  . Bank Select Register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  .		yyyy yyyy 0000 00xx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481)  .		xx 		= bank number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)  .		yyyy yyyy	= 0x33, for identification purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) #define BANK_SELECT		(14 << SMC_IO_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) // Transmit Control Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) /* BANK 0  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) #define TCR_REG(lp) 	SMC_REG(lp, 0x0000, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) #define TCR_ENABLE	0x0001	// When 1 we can transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) #define TCR_LOOP	0x0002	// Controls output pin LBK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) #define TCR_FORCOL	0x0004	// When 1 will force a collision
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) #define TCR_PAD_EN	0x0080	// When 1 will pad tx frames < 64 bytes w/0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) #define TCR_NOCRC	0x0100	// When 1 will not append CRC to tx frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) #define TCR_MON_CSN	0x0400	// When 1 tx monitors carrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) #define TCR_FDUPLX    	0x0800  // When 1 enables full duplex operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) #define TCR_STP_SQET	0x1000	// When 1 stops tx if Signal Quality Error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) #define TCR_EPH_LOOP	0x2000	// When 1 enables EPH block loopback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) #define TCR_SWFDUP	0x8000	// When 1 enables Switched Full Duplex mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) #define TCR_CLEAR	0	/* do NOTHING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) /* the default settings for the TCR register : */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) #define TCR_DEFAULT	(TCR_ENABLE | TCR_PAD_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) // EPH Status Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) /* BANK 0  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) #define EPH_STATUS_REG(lp)	SMC_REG(lp, 0x0002, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) #define ES_TX_SUC	0x0001	// Last TX was successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) #define ES_SNGL_COL	0x0002	// Single collision detected for last tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) #define ES_MUL_COL	0x0004	// Multiple collisions detected for last tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) #define ES_LTX_MULT	0x0008	// Last tx was a multicast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) #define ES_16COL	0x0010	// 16 Collisions Reached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) #define ES_SQET		0x0020	// Signal Quality Error Test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) #define ES_LTXBRD	0x0040	// Last tx was a broadcast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) #define ES_TXDEFR	0x0080	// Transmit Deferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) #define ES_LATCOL	0x0200	// Late collision detected on last tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) #define ES_LOSTCARR	0x0400	// Lost Carrier Sense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) #define ES_EXC_DEF	0x0800	// Excessive Deferral
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) #define ES_CTR_ROL	0x1000	// Counter Roll Over indication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) #define ES_LINK_OK	0x4000	// Driven by inverted value of nLNK pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) #define ES_TXUNRN	0x8000	// Tx Underrun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) // Receive Control Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) /* BANK 0  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) #define RCR_REG(lp)		SMC_REG(lp, 0x0004, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) #define RCR_RX_ABORT	0x0001	// Set if a rx frame was aborted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) #define RCR_PRMS	0x0002	// Enable promiscuous mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) #define RCR_ALMUL	0x0004	// When set accepts all multicast frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) #define RCR_RXEN	0x0100	// IFF this is set, we can receive packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) #define RCR_STRIP_CRC	0x0200	// When set strips CRC from rx packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) #define RCR_ABORT_ENB	0x0200	// When set will abort rx on collision
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) #define RCR_FILT_CAR	0x0400	// When set filters leading 12 bit s of carrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) #define RCR_SOFTRST	0x8000 	// resets the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) /* the normal settings for the RCR register : */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) #define RCR_DEFAULT	(RCR_STRIP_CRC | RCR_RXEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) #define RCR_CLEAR	0x0	// set it to a base state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) // Counter Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) /* BANK 0  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) #define COUNTER_REG(lp)	SMC_REG(lp, 0x0006, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) // Memory Information Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) /* BANK 0  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) #define MIR_REG(lp)		SMC_REG(lp, 0x0008, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) // Receive/Phy Control Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) /* BANK 0  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) #define RPC_REG(lp)		SMC_REG(lp, 0x000A, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) #define RPC_SPEED	0x2000	// When 1 PHY is in 100Mbps mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) #define RPC_DPLX	0x1000	// When 1 PHY is in Full-Duplex Mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) #define RPC_ANEG	0x0800	// When 1 PHY is in Auto-Negotiate Mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) #define RPC_LSXA_SHFT	5	// Bits to shift LS2A,LS1A,LS0A to lsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) #define RPC_LSXB_SHFT	2	// Bits to get LS2B,LS1B,LS0B to lsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) #ifndef RPC_LSA_DEFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) #define RPC_LSA_DEFAULT	RPC_LED_100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) #ifndef RPC_LSB_DEFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) #define RPC_LSB_DEFAULT RPC_LED_FD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) #define RPC_DEFAULT (RPC_ANEG | RPC_SPEED | RPC_DPLX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) /* Bank 0 0x0C is reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) // Bank Select Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) /* All Banks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) #define BSR_REG		0x000E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) // Configuration Reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) /* BANK 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) #define CONFIG_REG(lp)	SMC_REG(lp, 0x0000,	1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) #define CONFIG_EXT_PHY	0x0200	// 1=external MII, 0=internal Phy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) #define CONFIG_GPCNTRL	0x0400	// Inverse value drives pin nCNTRL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) #define CONFIG_NO_WAIT	0x1000	// When 1 no extra wait states on ISA bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) #define CONFIG_EPH_POWER_EN 0x8000 // When 0 EPH is placed into low power mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) // Default is powered-up, Internal Phy, Wait States, and pin nCNTRL=low
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) #define CONFIG_DEFAULT	(CONFIG_EPH_POWER_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) // Base Address Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) /* BANK 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) #define BASE_REG(lp)	SMC_REG(lp, 0x0002, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) // Individual Address Registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) /* BANK 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) #define ADDR0_REG(lp)	SMC_REG(lp, 0x0004, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) #define ADDR1_REG(lp)	SMC_REG(lp, 0x0006, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) #define ADDR2_REG(lp)	SMC_REG(lp, 0x0008, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) // General Purpose Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) /* BANK 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) #define GP_REG(lp)		SMC_REG(lp, 0x000A, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) // Control Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) /* BANK 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) #define CTL_REG(lp)		SMC_REG(lp, 0x000C, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) #define CTL_RCV_BAD	0x4000 // When 1 bad CRC packets are received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) #define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) #define CTL_LE_ENABLE	0x0080 // When 1 enables Link Error interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) #define CTL_CR_ENABLE	0x0040 // When 1 enables Counter Rollover interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) #define CTL_TE_ENABLE	0x0020 // When 1 enables Transmit Error interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) #define CTL_EEPROM_SELECT 0x0004 // Controls EEPROM reload & store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) #define CTL_RELOAD	0x0002 // When set reads EEPROM into registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) #define CTL_STORE	0x0001 // When set stores registers into EEPROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) // MMU Command Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) /* BANK 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) #define MMU_CMD_REG(lp)	SMC_REG(lp, 0x0000, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) #define MC_BUSY		1	// When 1 the last release has not completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) #define MC_NOP		(0<<5)	// No Op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) #define MC_ALLOC	(1<<5) 	// OR with number of 256 byte packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) #define MC_RESET	(2<<5)	// Reset MMU to initial state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) #define MC_REMOVE	(3<<5) 	// Remove the current rx packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) #define MC_RELEASE  	(4<<5) 	// Remove and release the current rx packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) #define MC_FREEPKT  	(5<<5) 	// Release packet in PNR register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) #define MC_ENQUEUE	(6<<5)	// Enqueue the packet for transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) #define MC_RSTTXFIFO	(7<<5)	// Reset the TX FIFOs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) // Packet Number Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) /* BANK 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) #define PN_REG(lp)		SMC_REG(lp, 0x0002, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) // Allocation Result Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) /* BANK 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) #define AR_REG(lp)		SMC_REG(lp, 0x0003, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) #define AR_FAILED	0x80	// Alocation Failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) // TX FIFO Ports Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) /* BANK 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) #define TXFIFO_REG(lp)	SMC_REG(lp, 0x0004, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) #define TXFIFO_TEMPTY	0x80	// TX FIFO Empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) // RX FIFO Ports Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) /* BANK 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) #define RXFIFO_REG(lp)	SMC_REG(lp, 0x0005, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) #define RXFIFO_REMPTY	0x80	// RX FIFO Empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) #define FIFO_REG(lp)	SMC_REG(lp, 0x0004, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) // Pointer Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) /* BANK 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) #define PTR_REG(lp)		SMC_REG(lp, 0x0006, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) #define PTR_RCV		0x8000 // 1=Receive area, 0=Transmit area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) #define PTR_AUTOINC 	0x4000 // Auto increment the pointer on each access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) #define PTR_READ	0x2000 // When 1 the operation is a read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) // Data Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) /* BANK 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) #define DATA_REG(lp)	SMC_REG(lp, 0x0008, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) // Interrupt Status/Acknowledge Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) /* BANK 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) #define INT_REG(lp)		SMC_REG(lp, 0x000C, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) // Interrupt Mask Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) /* BANK 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) #define IM_REG(lp)		SMC_REG(lp, 0x000D, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) #define IM_MDINT	0x80 // PHY MI Register 18 Interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) #define IM_ERCV_INT	0x40 // Early Receive Interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) #define IM_EPH_INT	0x20 // Set by Ethernet Protocol Handler section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) #define IM_RX_OVRN_INT	0x10 // Set by Receiver Overruns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) #define IM_ALLOC_INT	0x08 // Set when allocation request is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) #define IM_TX_EMPTY_INT	0x04 // Set if the TX FIFO goes empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) #define IM_TX_INT	0x02 // Transmit Interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) #define IM_RCV_INT	0x01 // Receive Interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) // Multicast Table Registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) /* BANK 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) #define MCAST_REG1(lp)	SMC_REG(lp, 0x0000, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) #define MCAST_REG2(lp)	SMC_REG(lp, 0x0002, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) #define MCAST_REG3(lp)	SMC_REG(lp, 0x0004, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) #define MCAST_REG4(lp)	SMC_REG(lp, 0x0006, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) // Management Interface Register (MII)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) /* BANK 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) #define MII_REG(lp)		SMC_REG(lp, 0x0008, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) #define MII_MSK_CRS100	0x4000 // Disables CRS100 detection during tx half dup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) #define MII_MDOE	0x0008 // MII Output Enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) #define MII_MCLK	0x0004 // MII Clock, pin MDCLK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) #define MII_MDI		0x0002 // MII Input, pin MDI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) #define MII_MDO		0x0001 // MII Output, pin MDO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) // Revision Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) /* BANK 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) /* ( hi: chip id   low: rev # ) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) #define REV_REG(lp)		SMC_REG(lp, 0x000A, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) // Early RCV Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) /* BANK 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) /* this is NOT on SMC9192 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) #define ERCV_REG(lp)	SMC_REG(lp, 0x000C, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) #define ERCV_RCV_DISCRD	0x0080 // When 1 discards a packet being received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) #define ERCV_THRESHOLD	0x001F // ERCV Threshold Mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) // External Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) /* BANK 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) #define EXT_REG(lp)		SMC_REG(lp, 0x0000, 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) #define CHIP_9192	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) #define CHIP_9194	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) #define CHIP_9195	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) #define CHIP_9196	6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) #define CHIP_91100	7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) #define CHIP_91100FD	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) #define CHIP_91111FD	9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) static const char * chip_ids[ 16 ] =  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	NULL, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	/* 3 */ "SMC91C90/91C92",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	/* 4 */ "SMC91C94",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	/* 5 */ "SMC91C95",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	/* 6 */ "SMC91C96",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	/* 7 */ "SMC91C100",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	/* 8 */ "SMC91C100FD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	/* 9 */ "SMC91C11xFD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	NULL, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	NULL, NULL, NULL};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  . Receive status bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) #define RS_ALGNERR	0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) #define RS_BRODCAST	0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) #define RS_BADCRC	0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) #define RS_ODDFRAME	0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) #define RS_TOOLONG	0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) #define RS_TOOSHORT	0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) #define RS_MULTICAST	0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) #define RS_ERRORS	(RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  * PHY IDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  *  LAN83C183 == LAN91C111 Internal PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) #define PHY_LAN83C183	0x0016f840
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) #define PHY_LAN83C180	0x02821c50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767)  * PHY Register Addresses (LAN91C111 Internal PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769)  * Generic PHY registers can be found in <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771)  * These phy registers are specific to our on-board phy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) // PHY Configuration Register 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) #define PHY_CFG1_REG		0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) #define PHY_CFG1_LNKDIS		0x8000	// 1=Rx Link Detect Function disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) #define PHY_CFG1_XMTDIS		0x4000	// 1=TP Transmitter Disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) #define PHY_CFG1_XMTPDN		0x2000	// 1=TP Transmitter Powered Down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) #define PHY_CFG1_BYPSCR		0x0400	// 1=Bypass scrambler/descrambler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) #define PHY_CFG1_UNSCDS		0x0200	// 1=Unscramble Idle Reception Disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) #define PHY_CFG1_EQLZR		0x0100	// 1=Rx Equalizer Disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) #define PHY_CFG1_CABLE		0x0080	// 1=STP(150ohm), 0=UTP(100ohm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) #define PHY_CFG1_RLVL0		0x0040	// 1=Rx Squelch level reduced by 4.5db
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) #define PHY_CFG1_TLVL_SHIFT	2	// Transmit Output Level Adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) #define PHY_CFG1_TLVL_MASK	0x003C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) #define PHY_CFG1_TRF_MASK	0x0003	// Transmitter Rise/Fall time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) // PHY Configuration Register 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) #define PHY_CFG2_REG		0x11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) #define PHY_CFG2_APOLDIS	0x0020	// 1=Auto Polarity Correction disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) #define PHY_CFG2_JABDIS		0x0010	// 1=Jabber disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) #define PHY_CFG2_MREG		0x0008	// 1=Multiple register access (MII mgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) #define PHY_CFG2_INTMDIO	0x0004	// 1=Interrupt signaled with MDIO pulseo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) // PHY Status Output (and Interrupt status) Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) #define PHY_INT_REG		0x12	// Status Output (Interrupt Status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) #define PHY_INT_INT		0x8000	// 1=bits have changed since last read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) #define PHY_INT_LNKFAIL		0x4000	// 1=Link Not detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) #define PHY_INT_LOSSSYNC	0x2000	// 1=Descrambler has lost sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) #define PHY_INT_CWRD		0x1000	// 1=Invalid 4B5B code detected on rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) #define PHY_INT_SSD		0x0800	// 1=No Start Of Stream detected on rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) #define PHY_INT_ESD		0x0400	// 1=No End Of Stream detected on rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) #define PHY_INT_RPOL		0x0200	// 1=Reverse Polarity detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) #define PHY_INT_JAB		0x0100	// 1=Jabber detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) #define PHY_INT_SPDDET		0x0080	// 1=100Base-TX mode, 0=10Base-T mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) #define PHY_INT_DPLXDET		0x0040	// 1=Device in Full Duplex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) // PHY Interrupt/Status Mask Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) #define PHY_MASK_REG		0x13	// Interrupt Mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) // Uses the same bit definitions as PHY_INT_REG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  * SMC91C96 ethernet config and status registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816)  * These are in the "attribute" space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) #define ECOR			0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) #define ECOR_RESET		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) #define ECOR_LEVEL_IRQ		0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) #define ECOR_WR_ATTRIB		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) #define ECOR_ENABLE		0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) #define ECSR			0x8002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) #define ECSR_IOIS8		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) #define ECSR_PWRDWN		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) #define ECSR_INT		0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) #define ATTRIB_SIZE		((64*1024) << SMC_IO_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833)  * Macros to abstract register access according to the data bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  * capabilities.  Please use those and not the in/out primitives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  * Note: the following macros do *not* select the bank -- this must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * be done separately as needed in the main code.  The SMC_REG() macro
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * only uses the bank argument for debugging purposes (when enabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  * Note: despite inline functions being safer, everything leading to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840)  * should preferably be macros to let BUG() display the line number in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841)  * the core source code since we're interested in the top call site
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842)  * not in any inline function location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) #if SMC_DEBUG > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) #define SMC_REG(lp, reg, bank)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		int __b = SMC_CURRENT_BANK(lp);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		if (unlikely((__b & ~0xf0) != (0x3300 | bank))) {	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 			pr_err("%s: bank reg screwed (0x%04x)\n",	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			       CARDNAME, __b);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			BUG();						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		reg<<SMC_IO_SHIFT;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) #define SMC_REG(lp, reg, bank)	(reg<<SMC_IO_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  * Hack Alert: Some setups just can't write 8 or 16 bits reliably when not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  * aligned to a 32 bit boundary.  I tell you that does exist!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863)  * Fortunately the affected register accesses can be easily worked around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  * since we can write zeroes to the preceding 16 bits without adverse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  * effects and use a 32-bit access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  * Enforce it on any 32-bit capable setup for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) #define SMC_MUST_ALIGN_WRITE(lp)	SMC_32BIT(lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) #define SMC_GET_PN(lp)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	(SMC_8BIT(lp)	? (SMC_inb(ioaddr, PN_REG(lp)))	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 				: (SMC_inw(ioaddr, PN_REG(lp)) & 0xFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) #define SMC_SET_PN(lp, x)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		if (SMC_MUST_ALIGN_WRITE(lp))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 0, 2));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		else if (SMC_8BIT(lp))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			SMC_outb(x, ioaddr, PN_REG(lp));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		else							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			SMC_outw(lp, x, ioaddr, PN_REG(lp));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) #define SMC_GET_AR(lp)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	(SMC_8BIT(lp)	? (SMC_inb(ioaddr, AR_REG(lp)))	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 				: (SMC_inw(ioaddr, PN_REG(lp)) >> 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) #define SMC_GET_TXFIFO(lp)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	(SMC_8BIT(lp)	? (SMC_inb(ioaddr, TXFIFO_REG(lp)))	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 				: (SMC_inw(ioaddr, TXFIFO_REG(lp)) & 0xFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) #define SMC_GET_RXFIFO(lp)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	(SMC_8BIT(lp)	? (SMC_inb(ioaddr, RXFIFO_REG(lp)))	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 				: (SMC_inw(ioaddr, TXFIFO_REG(lp)) >> 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) #define SMC_GET_INT(lp)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	(SMC_8BIT(lp)	? (SMC_inb(ioaddr, INT_REG(lp)))	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 				: (SMC_inw(ioaddr, INT_REG(lp)) & 0xFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) #define SMC_ACK_INT(lp, x)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		if (SMC_8BIT(lp))					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			SMC_outb(x, ioaddr, INT_REG(lp));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		else {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			unsigned long __flags;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			int __mask;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			local_irq_save(__flags);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			__mask = SMC_inw(ioaddr, INT_REG(lp)) & ~0xff; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			SMC_outw(lp, __mask | (x), ioaddr, INT_REG(lp)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			local_irq_restore(__flags);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) #define SMC_GET_INT_MASK(lp)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	(SMC_8BIT(lp)	? (SMC_inb(ioaddr, IM_REG(lp)))	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 				: (SMC_inw(ioaddr, INT_REG(lp)) >> 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) #define SMC_SET_INT_MASK(lp, x)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		if (SMC_8BIT(lp))					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			SMC_outb(x, ioaddr, IM_REG(lp));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		else							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			SMC_outw(lp, (x) << 8, ioaddr, INT_REG(lp));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) #define SMC_CURRENT_BANK(lp)	SMC_inw(ioaddr, BANK_SELECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) #define SMC_SELECT_BANK(lp, x)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		if (SMC_MUST_ALIGN_WRITE(lp))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		else							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			SMC_outw(lp, x, ioaddr, BANK_SELECT);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) #define SMC_GET_BASE(lp)		SMC_inw(ioaddr, BASE_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) #define SMC_SET_BASE(lp, x)	SMC_outw(lp, x, ioaddr, BASE_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) #define SMC_GET_CONFIG(lp)	SMC_inw(ioaddr, CONFIG_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) #define SMC_SET_CONFIG(lp, x)	SMC_outw(lp, x, ioaddr, CONFIG_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) #define SMC_GET_COUNTER(lp)	SMC_inw(ioaddr, COUNTER_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) #define SMC_GET_CTL(lp)		SMC_inw(ioaddr, CTL_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) #define SMC_SET_CTL(lp, x)	SMC_outw(lp, x, ioaddr, CTL_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) #define SMC_GET_MII(lp)		SMC_inw(ioaddr, MII_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) #define SMC_GET_GP(lp)		SMC_inw(ioaddr, GP_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) #define SMC_SET_GP(lp, x)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		if (SMC_MUST_ALIGN_WRITE(lp))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 			SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 1));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		else							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			SMC_outw(lp, x, ioaddr, GP_REG(lp));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) #define SMC_SET_MII(lp, x)	SMC_outw(lp, x, ioaddr, MII_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) #define SMC_GET_MIR(lp)		SMC_inw(ioaddr, MIR_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) #define SMC_SET_MIR(lp, x)	SMC_outw(lp, x, ioaddr, MIR_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) #define SMC_GET_MMU_CMD(lp)	SMC_inw(ioaddr, MMU_CMD_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) #define SMC_SET_MMU_CMD(lp, x)	SMC_outw(lp, x, ioaddr, MMU_CMD_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) #define SMC_GET_FIFO(lp)	SMC_inw(ioaddr, FIFO_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) #define SMC_GET_PTR(lp)		SMC_inw(ioaddr, PTR_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) #define SMC_SET_PTR(lp, x)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		if (SMC_MUST_ALIGN_WRITE(lp))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 4, 2));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		else							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			SMC_outw(lp, x, ioaddr, PTR_REG(lp));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) #define SMC_GET_EPH_STATUS(lp)	SMC_inw(ioaddr, EPH_STATUS_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) #define SMC_GET_RCR(lp)		SMC_inw(ioaddr, RCR_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) #define SMC_SET_RCR(lp, x)		SMC_outw(lp, x, ioaddr, RCR_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) #define SMC_GET_REV(lp)		SMC_inw(ioaddr, REV_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) #define SMC_GET_RPC(lp)		SMC_inw(ioaddr, RPC_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) #define SMC_SET_RPC(lp, x)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		if (SMC_MUST_ALIGN_WRITE(lp))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 			SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 0));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		else							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 			SMC_outw(lp, x, ioaddr, RPC_REG(lp));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) #define SMC_GET_TCR(lp)		SMC_inw(ioaddr, TCR_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) #define SMC_SET_TCR(lp, x)	SMC_outw(lp, x, ioaddr, TCR_REG(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) #ifndef SMC_GET_MAC_ADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) #define SMC_GET_MAC_ADDR(lp, addr)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		unsigned int __v;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		__v = SMC_inw(ioaddr, ADDR0_REG(lp));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		addr[0] = __v; addr[1] = __v >> 8;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		__v = SMC_inw(ioaddr, ADDR1_REG(lp));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		addr[2] = __v; addr[3] = __v >> 8;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		__v = SMC_inw(ioaddr, ADDR2_REG(lp));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		addr[4] = __v; addr[5] = __v >> 8;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) #define SMC_SET_MAC_ADDR(lp, addr)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		SMC_outw(lp, addr[0] | (addr[1] << 8), ioaddr, ADDR0_REG(lp)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		SMC_outw(lp, addr[2] | (addr[3] << 8), ioaddr, ADDR1_REG(lp)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		SMC_outw(lp, addr[4] | (addr[5] << 8), ioaddr, ADDR2_REG(lp)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) #define SMC_SET_MCAST(lp, x)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		const unsigned char *mt = (x);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		SMC_outw(lp, mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1(lp)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		SMC_outw(lp, mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2(lp)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		SMC_outw(lp, mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3(lp)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		SMC_outw(lp, mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4(lp)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) #define SMC_PUT_PKT_HDR(lp, status, length)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		if (SMC_32BIT(lp))					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			SMC_outl((status) | (length)<<16, ioaddr,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 				 DATA_REG(lp));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		else {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 			SMC_outw(lp, status, ioaddr, DATA_REG(lp));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			SMC_outw(lp, length, ioaddr, DATA_REG(lp));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) #define SMC_GET_PKT_HDR(lp, status, length)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		if (SMC_32BIT(lp)) {				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			unsigned int __val = SMC_inl(ioaddr, DATA_REG(lp)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			(status) = __val & 0xffff;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			(length) = __val >> 16;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		} else {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			(status) = SMC_inw(ioaddr, DATA_REG(lp));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			(length) = SMC_inw(ioaddr, DATA_REG(lp));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) #define SMC_PUSH_DATA(lp, p, l)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		if (SMC_32BIT(lp)) {				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			void *__ptr = (p);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			int __len = (l);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			void __iomem *__ioaddr = ioaddr;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			if (__len >= 2 && (unsigned long)__ptr & 2) {	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 				__len -= 2;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 				__ptr += 2;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			}						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			if (SMC_CAN_USE_DATACS && lp->datacs)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 				__ioaddr = lp->datacs;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			if (__len & 2) {				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 				__ptr += (__len & ~3);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 				SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			}						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		} else if (SMC_16BIT(lp))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		else if (SMC_8BIT(lp))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			SMC_outsb(ioaddr, DATA_REG(lp), p, l);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) #define SMC_PULL_DATA(lp, p, l)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		if (SMC_32BIT(lp)) {				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			void *__ptr = (p);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			int __len = (l);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			void __iomem *__ioaddr = ioaddr;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			if ((unsigned long)__ptr & 2) {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 				/*					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 				 * We want 32bit alignment here.	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 				 * Since some buses perform a full	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 				 * 32bit fetch even for 16bit data	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 				 * we can't use SMC_inw() here.		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 				 * Back both source (on-chip) and	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 				 * destination pointers of 2 bytes.	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 				 * This is possible since the call to	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 				 * SMC_GET_PKT_HDR() already advanced	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 				 * the source pointer of 4 bytes, and	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 				 * the skb_reserve(skb, 2) advanced	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 				 * the destination pointer of 2 bytes.	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 				 */					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 				__ptr -= 2;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 				__len += 2;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 				SMC_SET_PTR(lp,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 					2|PTR_READ|PTR_RCV|PTR_AUTOINC); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			}						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			if (SMC_CAN_USE_DATACS && lp->datacs)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 				__ioaddr = lp->datacs;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			__len += 2;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			SMC_insl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		} else if (SMC_16BIT(lp))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			SMC_insw(ioaddr, DATA_REG(lp), p, (l) >> 1);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		else if (SMC_8BIT(lp))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			SMC_insb(ioaddr, DATA_REG(lp), p, l);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) #endif  /* _SMC91X_H_ */