Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *	(c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *	(c) Copyright 2000, 2001 Red Hat Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *	Development of this driver was funded by Equiinet Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *			http://www.equiinet.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *	ChangeLog:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *	Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *	unification of all the Z85x30 asynchronous drivers for real.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *	DMA now uses get_free_page as kmalloc buffers may span a 64K 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *	boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *	Modified for SMP safety and SMP locking by Alan Cox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *					<alan@lxorguk.ukuu.org.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *	Performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *	Z85230:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *	Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *	X.25 is not unrealistic on all machines. DMA mode can in theory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *	handle T1/E1 quite nicely. In practice the limit seems to be about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *	512Kbit->1Mbit depending on motherboard.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *	Z85C30:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  *	64K will take DMA, 9600 baud X.25 should be ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *	Z8530:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  *	Synchronous mode without DMA is unlikely to pass about 2400 baud.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/hdlc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define RT_LOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define RT_UNLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include "z85230.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  *	z8530_read_port - Architecture specific interface function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  *	@p: port to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  *	Provided port access methods. The Comtrol SV11 requires no delays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  *	between accesses and uses PC I/O. Some drivers may need a 5uS delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  *	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  *	In the longer term this should become an architecture specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  *	section so that this can become a generic driver interface for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  *	platforms. For now we only handle PC I/O ports with or without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  *	dread 5uS sanity delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  *	The caller must hold sufficient locks to avoid violating the horrible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  *	5uS delay rule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static inline int z8530_read_port(unsigned long p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	u8 r=inb(Z8530_PORT_OF(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	if(p&Z8530_PORT_SLEEP)	/* gcc should figure this out efficiently ! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  *	z8530_write_port - Architecture specific interface function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  *	@p: port to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  *	@d: value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  *	Write a value to a port with delays if need be. Note that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  *	caller must hold locks to avoid read/writes from other contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  *	violating the 5uS rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  *	In the longer term this should become an architecture specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  *	section so that this can become a generic driver interface for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  *	platforms. For now we only handle PC I/O ports with or without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  *	dread 5uS sanity delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static inline void z8530_write_port(unsigned long p, u8 d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	outb(d,Z8530_PORT_OF(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	if(p&Z8530_PORT_SLEEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static void z8530_rx_done(struct z8530_channel *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) static void z8530_tx_done(struct z8530_channel *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  *	read_zsreg - Read a register from a Z85230 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  *	@c: Z8530 channel to read from (2 per chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  *	@reg: Register to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  *	FIXME: Use a spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  *	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  *	Most of the Z8530 registers are indexed off the control registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  *	A read is done by writing to the control register and reading the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  *	register back.  The caller must hold the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	if(reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		z8530_write_port(c->ctrlio, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	return z8530_read_port(c->ctrlio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  *	read_zsdata - Read the data port of a Z8530 channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  *	@c: The Z8530 channel to read the data port from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  *	The data port provides fast access to some things. We still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  *	have all the 5uS delays to worry about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) static inline u8 read_zsdata(struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	u8 r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	r=z8530_read_port(c->dataio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  *	write_zsreg - Write to a Z8530 channel register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  *	@c: The Z8530 channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  *	@reg: Register number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  *	@val: Value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  *	Write a value to an indexed register. The caller must hold the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  *	to honour the irritating delay rules. We know about register 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  *	being fast to access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  *      Assumes c->lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	if(reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		z8530_write_port(c->ctrlio, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	z8530_write_port(c->ctrlio, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  *	write_zsctrl - Write to a Z8530 control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  *	@c: The Z8530 channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  *	@val: Value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  *	Write directly to the control register on the Z8530
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) static inline void write_zsctrl(struct z8530_channel *c, u8 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	z8530_write_port(c->ctrlio, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  *	write_zsdata - Write to a Z8530 control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  *	@c: The Z8530 channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  *	@val: Value to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  *	Write directly to the data register on the Z8530
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) static inline void write_zsdata(struct z8530_channel *c, u8 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	z8530_write_port(c->dataio, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  *	Register loading parameters for a dead port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) u8 z8530_dead_port[]=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) EXPORT_SYMBOL(z8530_dead_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  *	Register loading parameters for currently supported circuit types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  *	Data clocked by telco end. This is the correct data for the UK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  *	"kilostream" service, and most other similar services.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) u8 z8530_hdlc_kilostream[]=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	4,	SYNC_ENAB|SDLC|X1CLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	2,	0,	/* No vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	1,	0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	3,	ENT_HM|RxCRC_ENAB|Rx8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	9,	0,		/* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	6,	0xFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	7,	FLAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	10,	ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	11,	TCTRxCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	14,	DISDPLL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	9,	NV|MIE|NORESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) EXPORT_SYMBOL(z8530_hdlc_kilostream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  *	As above but for enhanced chips.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) u8 z8530_hdlc_kilostream_85230[]=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	4,	SYNC_ENAB|SDLC|X1CLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	2,	0,	/* No vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	1,	0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	3,	ENT_HM|RxCRC_ENAB|Rx8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	9,	0,		/* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	6,	0xFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	7,	FLAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	10,	ABUNDER|NRZ|CRCPS,	/* MARKIDLE?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	11,	TCTRxCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	14,	DISDPLL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	9,	NV|MIE|NORESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	23,	3,		/* Extended mode AUTO TX and EOM*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  *	z8530_flush_fifo - Flush on chip RX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  *	@c: Channel to flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  *	Flush the receive FIFO. There is no specific option for this, we 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  *	blindly read bytes and discard them. Reading when there is no data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  *	is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  *	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  *	All locking is handled for the caller. On return data may still be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  *	present if it arrived during the flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) static void z8530_flush_fifo(struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	read_zsreg(c, R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	read_zsreg(c, R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	read_zsreg(c, R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	read_zsreg(c, R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	if(c->dev->type==Z85230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		read_zsreg(c, R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		read_zsreg(c, R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		read_zsreg(c, R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		read_zsreg(c, R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) }	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  *	z8530_rtsdtr - Control the outgoing DTS/RTS line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  *	@c: The Z8530 channel to control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  *	@set: 1 to set, 0 to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  *	Sets or clears DTR/RTS on the requested line. All locking is handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  *	by the caller. For now we assume all boards use the actual RTS/DTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  *	on the chip. Apparently one or two don't. We'll scream about them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)  *	later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) static void z8530_rtsdtr(struct z8530_channel *c, int set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	if (set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		c->regs[5] |= (RTS | DTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		c->regs[5] &= ~(RTS | DTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	write_zsreg(c, R5, c->regs[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  *	z8530_rx - Handle a PIO receive event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  *	@c: Z8530 channel to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  *	Receive handler for receiving in PIO mode. This is much like the 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  *	async one but not quite the same or as complex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  *	Note: Its intended that this handler can easily be separated from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  *	the main code to run realtime. That'll be needed for some machines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  *	(eg to ever clock 64kbits on a sparc ;)).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  *	The RT_LOCK macros don't do anything now. Keep the code covered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  *	by them as short as possible in all circumstances - clocks cost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  *	baud. The interrupt handler is assumed to be atomic w.r.t. to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  *	other code - this is true in the RT case too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  *	We only cover the sync cases for this. If you want 2Mbit async
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  *	do it yourself but consider medical assistance first. This non DMA 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  *	synchronous mode is portable code. The DMA mode assumes PCI like 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  *	ISA DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  *	Called with the device lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) static void z8530_rx(struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	u8 ch,stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	while(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		/* FIFO empty ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		if(!(read_zsreg(c, R0)&1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		ch=read_zsdata(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		stat=read_zsreg(c, R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		 *	Overrun ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		if(c->count < c->max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 			*c->dptr++=ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 			c->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		if(stat&END_FR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			 *	Error ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			if(stat&(Rx_OVR|CRC_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 				/* Rewind the buffer and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 				if(c->skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 					c->dptr=c->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 				c->count=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 				if(stat&Rx_OVR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 				{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 					pr_warn("%s: overrun\n", c->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 					c->rx_overrun++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 				if(stat&CRC_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 				{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 					c->rx_crc_err++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 					/* printk("crc error\n"); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 				/* Shove the frame upstream */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				 *	Drop the lock for RX processing, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		 		 *	there are deadlocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 				z8530_rx_done(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 				write_zsctrl(c, RES_Rx_CRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	 *	Clear irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	write_zsctrl(c, ERR_RES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	write_zsctrl(c, RES_H_IUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397)  *	z8530_tx - Handle a PIO transmit event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398)  *	@c: Z8530 channel to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400)  *	Z8530 transmit interrupt handler for the PIO mode. The basic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401)  *	idea is to attempt to keep the FIFO fed. We fill as many bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402)  *	in as possible, its quite possible that we won't keep up with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403)  *	data rate otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) static void z8530_tx(struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	while(c->txcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		/* FIFO full ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		if(!(read_zsreg(c, R0)&4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		c->txcount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		 *	Shovel out the byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		write_zsreg(c, R8, *c->tx_ptr++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		write_zsctrl(c, RES_H_IUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		/* We are about to underflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		if(c->txcount==0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 			write_zsctrl(c, RES_EOM_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	 *	End of frame TX - fire another one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	write_zsctrl(c, RES_Tx_P);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	z8530_tx_done(c);	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	write_zsctrl(c, RES_H_IUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438)  *	z8530_status - Handle a PIO status exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439)  *	@chan: Z8530 channel to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441)  *	A status event occurred in PIO synchronous mode. There are several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442)  *	reasons the chip will bother us here. A transmit underrun means we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443)  *	failed to feed the chip fast enough and just broke a packet. A DCD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444)  *	change is a line up or down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) static void z8530_status(struct z8530_channel *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	u8 status, altered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	status = read_zsreg(chan, R0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	altered = chan->status ^ status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	chan->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	if (status & TxEOM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) /*		printk("%s: Tx underrun.\n", chan->dev->name); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		chan->netdevice->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		write_zsctrl(chan, ERR_RES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		z8530_tx_done(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	if (altered & chan->dcdcheck)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		if (status & chan->dcdcheck) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			pr_info("%s: DCD raised\n", chan->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			if (chan->netdevice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 				netif_carrier_on(chan->netdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			pr_info("%s: DCD lost\n", chan->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			z8530_flush_fifo(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			if (chan->netdevice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 				netif_carrier_off(chan->netdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	write_zsctrl(chan, RES_EXT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	write_zsctrl(chan, RES_H_IUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) struct z8530_irqhandler z8530_sync = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	.rx = z8530_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	.tx = z8530_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	.status = z8530_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) EXPORT_SYMBOL(z8530_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  *	z8530_dma_rx - Handle a DMA RX event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  *	@chan: Channel to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  *	Non bus mastering DMA interfaces for the Z8x30 devices. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  *	is really pretty PC specific. The DMA mode means that most receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  *	events are handled by the DMA hardware. We get a kick here only if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  *	a frame ended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) static void z8530_dma_rx(struct z8530_channel *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if(chan->rxdma_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		/* Special condition check only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		read_zsreg(chan, R7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		read_zsreg(chan, R6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		status=read_zsreg(chan, R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		if(status&END_FR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			z8530_rx_done(chan);	/* Fire up the next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		}		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		write_zsctrl(chan, ERR_RES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		write_zsctrl(chan, RES_H_IUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		/* DMA is off right now, drain the slow way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		z8530_rx(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	}	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  *	z8530_dma_tx - Handle a DMA TX event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  *	@chan:	The Z8530 channel to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  *	We have received an interrupt while doing DMA transmissions. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  *	shouldn't happen. Scream loudly if it does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) static void z8530_dma_tx(struct z8530_channel *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	if(!chan->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		pr_warn("Hey who turned the DMA off?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		z8530_tx(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	/* This shouldn't occur in DMA mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	pr_err("DMA tx - bogus event!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	z8530_tx(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549)  *	z8530_dma_status - Handle a DMA status exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  *	@chan: Z8530 channel to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  *	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  *	A status event occurred on the Z8530. We receive these for two reasons
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  *	when in DMA mode. Firstly if we finished a packet transfer we get one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  *	and kick the next packet out. Secondly we may see a DCD change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) static void z8530_dma_status(struct z8530_channel *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	u8 status, altered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	status=read_zsreg(chan, R0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	altered=chan->status^status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	chan->status=status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	if(chan->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		if(status&TxEOM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			flags=claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			disable_dma(chan->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			clear_dma_ff(chan->txdma);	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			chan->txdma_on=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			release_dma_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			z8530_tx_done(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	if (altered & chan->dcdcheck)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		if (status & chan->dcdcheck) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			pr_info("%s: DCD raised\n", chan->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 			if (chan->netdevice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 				netif_carrier_on(chan->netdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			pr_info("%s: DCD lost\n", chan->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			z8530_flush_fifo(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			if (chan->netdevice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 				netif_carrier_off(chan->netdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	write_zsctrl(chan, RES_EXT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	write_zsctrl(chan, RES_H_IUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) static struct z8530_irqhandler z8530_dma_sync = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	.rx = z8530_dma_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	.tx = z8530_dma_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	.status = z8530_dma_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) static struct z8530_irqhandler z8530_txdma_sync = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	.rx = z8530_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	.tx = z8530_dma_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	.status = z8530_dma_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  *	z8530_rx_clear - Handle RX events from a stopped chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  *	@c: Z8530 channel to shut up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  *	Receive interrupt vectors for a Z8530 that is in 'parked' mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  *	For machines with PCI Z85x30 cards, or level triggered interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621)  *	(eg the MacII) we must clear the interrupt cause or die.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) static void z8530_rx_clear(struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	 *	Data and status bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	u8 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	read_zsdata(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	stat=read_zsreg(c, R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	if(stat&END_FR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		write_zsctrl(c, RES_Rx_CRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	 *	Clear irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	write_zsctrl(c, ERR_RES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	write_zsctrl(c, RES_H_IUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  *	z8530_tx_clear - Handle TX events from a stopped chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  *	@c: Z8530 channel to shut up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  *	Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  *	For machines with PCI Z85x30 cards, or level triggered interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  *	(eg the MacII) we must clear the interrupt cause or die.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) static void z8530_tx_clear(struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	write_zsctrl(c, RES_Tx_P);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	write_zsctrl(c, RES_H_IUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  *	z8530_status_clear - Handle status events from a stopped chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  *	@chan: Z8530 channel to shut up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  *	Status interrupt vectors for a Z8530 that is in 'parked' mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664)  *	For machines with PCI Z85x30 cards, or level triggered interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  *	(eg the MacII) we must clear the interrupt cause or die.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static void z8530_status_clear(struct z8530_channel *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	u8 status=read_zsreg(chan, R0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	if(status&TxEOM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		write_zsctrl(chan, ERR_RES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	write_zsctrl(chan, RES_EXT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	write_zsctrl(chan, RES_H_IUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) struct z8530_irqhandler z8530_nop = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	.rx = z8530_rx_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	.tx = z8530_tx_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	.status = z8530_status_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) EXPORT_SYMBOL(z8530_nop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  *	z8530_interrupt - Handle an interrupt from a Z8530
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  *	@irq: 	Interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689)  *	@dev_id: The Z8530 device that is interrupting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691)  *	A Z85[2]30 device has stuck its hand in the air for attention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692)  *	We scan both the channels on the chip for events and then call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  *	the channel specific call backs for each channel that has events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  *	We have to use callback functions because the two channels can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  *	in different modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  *	Locking is done for the handlers. Note that locking is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  *	at the chip level (the 5uS delay issue is per chip not per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  *	channel). c->lock for both channels points to dev->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) irqreturn_t z8530_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	struct z8530_dev *dev=dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	u8 intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	static volatile int locker=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	int work=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	struct z8530_irqhandler *irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	if(locker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		pr_err("IRQ re-enter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	locker=1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	spin_lock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	while(++work<5000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		intr = read_zsreg(&dev->chanA, R3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		/* This holds the IRQ status. On the 8530 you must read it from chan 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		   A even though it applies to the whole chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		/* Now walk the chip and see what it is wanting - it may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		   an IRQ for someone else remember */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		irqs=dev->chanA.irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		if(intr & (CHARxIP|CHATxIP|CHAEXT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			if(intr&CHARxIP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 				irqs->rx(&dev->chanA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			if(intr&CHATxIP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 				irqs->tx(&dev->chanA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			if(intr&CHAEXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 				irqs->status(&dev->chanA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		irqs=dev->chanB.irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			if(intr&CHBRxIP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 				irqs->rx(&dev->chanB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			if(intr&CHBTxIP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 				irqs->tx(&dev->chanB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			if(intr&CHBEXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 				irqs->status(&dev->chanB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	spin_unlock(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if(work==5000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		pr_err("%s: interrupt jammed - abort(0x%X)!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		       dev->name, intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	/* Ok all done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	locker=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) EXPORT_SYMBOL(z8530_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) static const u8 reg_init[16]=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	0,0,0,0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	0,0,0,0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	0,0,0,0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	0x55,0,0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  *	z8530_sync_open - Open a Z8530 channel for PIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  *	@dev:	The network interface we are using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  *	@c:	The Z8530 channel to open in synchronous PIO mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781)  *	Switch a Z8530 into synchronous mode without DMA assist. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782)  *	raise the RTS/DTR and commence network operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	spin_lock_irqsave(c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	c->sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	c->mtu = dev->mtu+64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	c->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	c->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	c->skb2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	c->irqs = &z8530_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	/* This loads the double buffer up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	z8530_rx_done(c);	/* Load the frame ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	z8530_rx_done(c);	/* Load the backup frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	z8530_rtsdtr(c,1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	c->dma_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	c->regs[R1]|=TxINT_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	write_zsreg(c, R1, c->regs[R1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	spin_unlock_irqrestore(c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) EXPORT_SYMBOL(z8530_sync_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  *	z8530_sync_close - Close a PIO Z8530 channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816)  *	@dev: Network device to close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817)  *	@c: Z8530 channel to disassociate and move to idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819)  *	Close down a Z8530 interface and switch its interrupt handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820)  *	to discard future events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	u8 chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	spin_lock_irqsave(c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	c->irqs = &z8530_nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	c->max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	c->sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	chk=read_zsreg(c,R0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	write_zsreg(c, R3, c->regs[R3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	z8530_rtsdtr(c,0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	spin_unlock_irqrestore(c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) EXPORT_SYMBOL(z8530_sync_close);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844)  *	z8530_sync_dma_open - Open a Z8530 for DMA I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845)  *	@dev: The network device to attach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846)  *	@c: The Z8530 channel to configure in sync DMA mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848)  *	Set up a Z85x30 device for synchronous DMA in both directions. Two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849)  *	ISA DMA channels must be available for this to work. We assume ISA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850)  *	DMA driven I/O and PC limits on access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	unsigned long cflags, dflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	c->sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	c->mtu = dev->mtu+64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	c->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	c->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	c->skb2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	 *	Load the DMA interfaces up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	c->rxdma_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	c->txdma_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	 *	Allocate the DMA flip buffers. Limit by page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	 *	Everyone runs 1500 mtu or less on wan links so this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	 *	should be fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	if(c->mtu  > PAGE_SIZE/2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if(c->rx_buf[0]==NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if(c->tx_dma_buf[0]==NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		free_page((unsigned long)c->rx_buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		c->rx_buf[0]=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	c->tx_dma_used=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	c->dma_tx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	c->dma_num=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	c->dma_ready=1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	 *	Enable DMA control mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	spin_lock_irqsave(c->lock, cflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	 *	TX DMA via DIR/REQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	c->regs[R14]|= DTRREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	write_zsreg(c, R14, c->regs[R14]);     
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	c->regs[R1]&= ~TxINT_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	write_zsreg(c, R1, c->regs[R1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	 *	RX DMA via W/Req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	 */	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	c->regs[R1]|= WT_FN_RDYFN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	c->regs[R1]|= WT_RDY_RT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	c->regs[R1]|= INT_ERR_Rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	c->regs[R1]&= ~TxINT_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	write_zsreg(c, R1, c->regs[R1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	c->regs[R1]|= WT_RDY_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	write_zsreg(c, R1, c->regs[R1]);            
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	 *	DMA interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	 *	Set up the DMA configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	 */	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	dflags=claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	disable_dma(c->rxdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	clear_dma_ff(c->rxdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	set_dma_count(c->rxdma, c->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	enable_dma(c->rxdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	disable_dma(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	clear_dma_ff(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	set_dma_mode(c->txdma, DMA_MODE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	disable_dma(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	release_dma_lock(dflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	 *	Select the DMA interrupt handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	c->rxdma_on = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	c->txdma_on = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	c->tx_dma_used = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	c->irqs = &z8530_dma_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	z8530_rtsdtr(c,1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	spin_unlock_irqrestore(c->lock, cflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) EXPORT_SYMBOL(z8530_sync_dma_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968)  *	z8530_sync_dma_close - Close down DMA I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969)  *	@dev: Network device to detach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970)  *	@c: Z8530 channel to move into discard mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972)  *	Shut down a DMA mode synchronous interface. Halt the DMA, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  *	free the buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	u8 chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	c->irqs = &z8530_nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	c->max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	c->sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	 *	Disable the PC DMA channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	flags=claim_dma_lock(); 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	disable_dma(c->rxdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	clear_dma_ff(c->rxdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	c->rxdma_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	disable_dma(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	clear_dma_ff(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	release_dma_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	c->txdma_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	c->tx_dma_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	spin_lock_irqsave(c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	 *	Disable DMA control mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	c->regs[R1]&= ~WT_RDY_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	write_zsreg(c, R1, c->regs[R1]);            
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	c->regs[R1]|= INT_ALL_Rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	write_zsreg(c, R1, c->regs[R1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	c->regs[R14]&= ~DTRREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	write_zsreg(c, R14, c->regs[R14]);   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	if(c->rx_buf[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		free_page((unsigned long)c->rx_buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		c->rx_buf[0]=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	if(c->tx_dma_buf[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		free_page((unsigned  long)c->tx_dma_buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		c->tx_dma_buf[0]=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	chk=read_zsreg(c,R0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	write_zsreg(c, R3, c->regs[R3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	z8530_rtsdtr(c,0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	spin_unlock_irqrestore(c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) EXPORT_SYMBOL(z8530_sync_dma_close);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  *	z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  *	@dev: The network device to attach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)  *	@c: The Z8530 channel to configure in sync DMA mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)  *	Set up a Z85x30 device for synchronous DMA transmission. One
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)  *	ISA DMA channel must be available for this to work. The receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)  *	side is run in PIO mode, but then it has the bigger FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	unsigned long cflags, dflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	printk("Opening sync interface for TX-DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	c->sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	c->mtu = dev->mtu+64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	c->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	c->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	c->skb2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	 *	Allocate the DMA flip buffers. Limit by page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	 *	Everyone runs 1500 mtu or less on wan links so this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	 *	should be fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	if(c->mtu  > PAGE_SIZE/2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if(c->tx_dma_buf[0]==NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	spin_lock_irqsave(c->lock, cflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	 *	Load the PIO receive ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	z8530_rx_done(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	z8530_rx_done(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	 *	Load the DMA interfaces up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	c->rxdma_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	c->txdma_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	c->tx_dma_used=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	c->dma_num=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	c->dma_ready=1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	c->dma_tx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)  	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	 *	Enable DMA control mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	 *	TX DMA via DIR/REQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	c->regs[R14]|= DTRREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	write_zsreg(c, R14, c->regs[R14]);     
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	c->regs[R1]&= ~TxINT_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	write_zsreg(c, R1, c->regs[R1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	 *	Set up the DMA configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	 */	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	dflags = claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	disable_dma(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	clear_dma_ff(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	set_dma_mode(c->txdma, DMA_MODE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	disable_dma(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	release_dma_lock(dflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	 *	Select the DMA interrupt handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	c->rxdma_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	c->txdma_on = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	c->tx_dma_used = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	c->irqs = &z8530_txdma_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	z8530_rtsdtr(c,1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	spin_unlock_irqrestore(c->lock, cflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) EXPORT_SYMBOL(z8530_sync_txdma_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)  *	z8530_sync_txdma_close - Close down a TX driven DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)  *	@dev: Network device to detach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)  *	@c: Z8530 channel to move into discard mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)  *	Shut down a DMA/PIO split mode synchronous interface. Halt the DMA, 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)  *	and  free the buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	unsigned long dflags, cflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	u8 chk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	spin_lock_irqsave(c->lock, cflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	c->irqs = &z8530_nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	c->max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	c->sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	 *	Disable the PC DMA channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	dflags = claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	disable_dma(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	clear_dma_ff(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	c->txdma_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	c->tx_dma_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	release_dma_lock(dflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	 *	Disable DMA control mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	c->regs[R1]&= ~WT_RDY_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	write_zsreg(c, R1, c->regs[R1]);            
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	c->regs[R1]|= INT_ALL_Rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	write_zsreg(c, R1, c->regs[R1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	c->regs[R14]&= ~DTRREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	write_zsreg(c, R14, c->regs[R14]);   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	if(c->tx_dma_buf[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		free_page((unsigned long)c->tx_dma_buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		c->tx_dma_buf[0]=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	chk=read_zsreg(c,R0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	write_zsreg(c, R3, c->regs[R3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	z8530_rtsdtr(c,0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	spin_unlock_irqrestore(c->lock, cflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) EXPORT_SYMBOL(z8530_sync_txdma_close);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)  *	Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)  *	it exists...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static const char *z8530_type_name[]={
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	"Z8530",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	"Z85C30",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	"Z85230"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  *	z8530_describe - Uniformly describe a Z8530 port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)  *	@dev: Z8530 device to describe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)  *	@mapping: string holding mapping type (eg "I/O" or "Mem")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)  *	@io: the port value in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)  *	Describe a Z8530 in a standard format. We must pass the I/O as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)  *	the port offset isn't predictable. The main reason for this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)  *	is to try and get a common format of report.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		dev->name, 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		z8530_type_name[dev->type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		Z8530_PORT_OF(io),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) EXPORT_SYMBOL(z8530_describe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)  *	Locked operation part of the z8530 init code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) static inline int do_z8530_init(struct z8530_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	/* NOP the interrupt handlers first - we might get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	   floating IRQ transition when we reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	dev->chanA.irqs=&z8530_nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	dev->chanB.irqs=&z8530_nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	dev->chanA.dcdcheck=DCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	dev->chanB.dcdcheck=DCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	/* Reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	write_zsreg(&dev->chanA, R9, 0xC0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	udelay(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	/* Now check its valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	write_zsreg(&dev->chanA, R12, 0xAA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	if(read_zsreg(&dev->chanA, R12)!=0xAA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	write_zsreg(&dev->chanA, R12, 0x55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	if(read_zsreg(&dev->chanA, R12)!=0x55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	dev->type=Z8530;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	 *	See the application note.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	write_zsreg(&dev->chanA, R15, 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	 *	If we can set the low bit of R15 then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	 *	the chip is enhanced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	if(read_zsreg(&dev->chanA, R15)==0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		/* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		/* Put a char in the fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		write_zsreg(&dev->chanA, R8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 			dev->type = Z85230;	/* Has a FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 			dev->type = Z85C30;	/* Z85C30, 1 byte FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	 *	The code assumes R7' and friends are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	 *	off. Use write_zsext() for these and keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	 *	this bit clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	write_zsreg(&dev->chanA, R15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	 *	At this point it looks like the chip is behaving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	memcpy(dev->chanA.regs, reg_init, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	memcpy(dev->chanB.regs, reg_init ,16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)  *	z8530_init - Initialise a Z8530 device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)  *	@dev: Z8530 device to initialise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)  *	Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)  *	is present, identify the type and then program it to hopefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)  *	keep quite and behave. This matters a lot, a Z8530 in the wrong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)  *	state will sometimes get into stupid modes generating 10Khz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)  *	interrupt streams and the like.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)  *	We set the interrupt handler up to discard any events, in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)  *	we get them during reset or setp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)  *	Return 0 for success, or a negative value indicating the problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)  *	in errno form.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) int z8530_init(struct z8530_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	/* Set up the chip level lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	spin_lock_init(&dev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	dev->chanA.lock = &dev->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	dev->chanB.lock = &dev->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	spin_lock_irqsave(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	ret = do_z8530_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) EXPORT_SYMBOL(z8530_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  *	z8530_shutdown - Shutdown a Z8530 device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)  *	@dev: The Z8530 chip to shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)  *	We set the interrupt handlers to silence any interrupts. We then 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)  *	reset the chip and wait 100uS to be sure the reset completed. Just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)  *	in case the caller then tries to do stuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)  *	This is called without the lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) int z8530_shutdown(struct z8530_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	/* Reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	spin_lock_irqsave(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	dev->chanA.irqs=&z8530_nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	dev->chanB.irqs=&z8530_nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	write_zsreg(&dev->chanA, R9, 0xC0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	/* We must lock the udelay, the chip is offlimits here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	spin_unlock_irqrestore(&dev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) EXPORT_SYMBOL(z8530_shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)  *	z8530_channel_load - Load channel data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)  *	@c: Z8530 channel to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)  *	@rtable: table of register, value pairs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)  *	FIXME: ioctl to allow user uploaded tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)  *	Load a Z8530 channel up from the system data. We use +16 to 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)  *	indicate the "prime" registers. The value 255 terminates the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)  *	table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	spin_lock_irqsave(c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	while(*rtable!=255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		int reg=*rtable++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		if(reg>0x0F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 			write_zsreg(c, R15, c->regs[15]|1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		write_zsreg(c, reg&0x0F, *rtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		if(reg>0x0F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 			write_zsreg(c, R15, c->regs[15]&~1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		c->regs[reg]=*rtable++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	c->rx_function=z8530_null_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	c->skb=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	c->tx_skb=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	c->tx_next_skb=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	c->mtu=1500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	c->max=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	c->count=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	c->status=read_zsreg(c, R0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	c->sync=1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	spin_unlock_irqrestore(c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) EXPORT_SYMBOL(z8530_channel_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)  *	z8530_tx_begin - Begin packet transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)  *	@c: The Z8530 channel to kick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)  *	This is the speed sensitive side of transmission. If we are called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)  *	and no buffer is being transmitted we commence the next buffer. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)  *	nothing is queued we idle the sync. 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)  *	Note: We are handling this code path in the interrupt path, keep it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)  *	fast or bad things will happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)  *	Called with the lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static void z8530_tx_begin(struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	if(c->tx_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	c->tx_skb=c->tx_next_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	c->tx_next_skb=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	c->tx_ptr=c->tx_next_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	if(c->tx_skb==NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		/* Idle on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		if(c->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			flags=claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 			disable_dma(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			 *	Check if we crapped out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 			if (get_dma_residue(c->txdma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 			{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 				c->netdevice->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 				c->netdevice->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 			release_dma_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		c->txcount=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		c->txcount=c->tx_skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		if(c->dma_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 			 *	FIXME. DMA is broken for the original 8530,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 			 *	on the older parts we need to set a flag and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 			 *	wait for a further TX interrupt to fire this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 			 *	stage off	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 			 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 			flags=claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 			disable_dma(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			 *	These two are needed by the 8530/85C30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			 *	and must be issued when idling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			if(c->dev->type!=Z85230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 			{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 				write_zsctrl(c, RES_Tx_CRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 				write_zsctrl(c, RES_EOM_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			}	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			clear_dma_ff(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 			set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			set_dma_count(c->txdma, c->txcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			enable_dma(c->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 			release_dma_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 			write_zsctrl(c, RES_EOM_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 			write_zsreg(c, R5, c->regs[R5]|TxENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			/* ABUNDER off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 			write_zsreg(c, R10, c->regs[10]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 			write_zsctrl(c, RES_Tx_CRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 			{		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 				write_zsreg(c, R8, *c->tx_ptr++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 				c->txcount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	 *	Since we emptied tx_skb we can ask for more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	netif_wake_queue(c->netdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)  *	z8530_tx_done - TX complete callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)  *	@c: The channel that completed a transmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)  *	This is called when we complete a packet send. We wake the queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)  *	start the next packet going and then free the buffer of the existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)  *	packet. This code is fairly timing sensitive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)  *	Called with the register lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static void z8530_tx_done(struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	/* Actually this can happen.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	if (c->tx_skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	skb = c->tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	c->tx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	z8530_tx_begin(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	c->netdevice->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	c->netdevice->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	dev_consume_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)  *	z8530_null_rx - Discard a packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)  *	@c: The channel the packet arrived on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)  *	@skb: The buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)  *	We point the receive handler at this function when idle. Instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)  *	of processing the frames we get to throw them away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) EXPORT_SYMBOL(z8530_null_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)  *	z8530_rx_done - Receive completion callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)  *	@c: The channel that completed a receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)  *	A new packet is complete. Our goal here is to get back into receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)  *	mode as fast as possible. On the Z85230 we could change to using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)  *	ESCC mode, but on the older chips we have no choice. We flip to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)  *	new buffer immediately in DMA mode so that the DMA of the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)  *	frame can occur while we are copying the previous buffer to an sk_buff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)  *	Called with the lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) static void z8530_rx_done(struct z8530_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	int ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	 *	Is our receive engine in DMA mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	if(c->rxdma_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		 *	Save the ready state and the buffer currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		 *	being used as the DMA target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		int ready=c->dma_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		unsigned char *rxb=c->rx_buf[c->dma_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		 *	Complete this DMA. Necessary to find the length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		 */		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		flags=claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		disable_dma(c->rxdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		clear_dma_ff(c->rxdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		c->rxdma_on=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		ct=c->mtu-get_dma_residue(c->rxdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		if(ct<0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 			ct=2;	/* Shit happens.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		c->dma_ready=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		 *	Normal case: the other slot is free, start the next DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		 *	into it immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		if(ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 			c->dma_num^=1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 			set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 			set_dma_count(c->rxdma, c->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 			c->rxdma_on = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 			enable_dma(c->rxdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 			/* Stop any frames that we missed the head of 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 			   from passing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 			write_zsreg(c, R0, RES_Rx_CRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 			/* Can't occur as we dont reenable the DMA irq until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 			   after the flip is done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 			netdev_warn(c->netdevice, "DMA flip overrun!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		release_dma_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		 *	Shove the old buffer into an sk_buff. We can't DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		 *	directly into one on a PC - it might be above the 16Mb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		 *	boundary. Optimisation - we could check to see if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		 *	can avoid the copy. Optimisation 2 - make the memcpy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		 *	a copychecksum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		skb = dev_alloc_skb(ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 			c->netdevice->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 			netdev_warn(c->netdevice, "Memory squeeze\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 			skb_put(skb, ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 			skb_copy_to_linear_data(skb, rxb, ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 			c->netdevice->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			c->netdevice->stats.rx_bytes += ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		c->dma_ready = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		RT_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		skb = c->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		 *	The game we play for non DMA is similar. We want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		 *	get the controller set up for the next packet as fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		 *	as possible. We potentially only have one byte + the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		 *	fifo length for this. Thus we want to flip to the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		 *	buffer and then mess around copying and allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		 *	things. For the current case it doesn't matter but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		 *	if you build a system where the sync irq isn't blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		 *	by the kernel IRQ disable then you need only block the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		 *	sync IRQ for the RT_LOCK area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		ct=c->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		c->skb = c->skb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		c->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		c->max = c->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		if (c->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 			c->dptr = c->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			c->max = c->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 			c->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 			c->max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		RT_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		c->skb2 = dev_alloc_skb(c->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		if (c->skb2 == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			netdev_warn(c->netdevice, "memory squeeze\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 			skb_put(c->skb2, c->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		c->netdevice->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		c->netdevice->stats.rx_bytes += ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	 *	If we received a frame we must now process it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		skb_trim(skb, ct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		c->rx_function(c, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		c->netdevice->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		netdev_err(c->netdevice, "Lost a frame\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)  *	spans_boundary - Check a packet can be ISA DMA'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)  *	@skb: The buffer to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)  *	Returns true if the buffer cross a DMA boundary on a PC. The poor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)  *	thing can only DMA within a 64K block not across the edges of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) static inline int spans_boundary(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	unsigned long a=(unsigned long)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	a^=(a+skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	if(a&0x00010000)	/* If the 64K bit is different.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)  *	z8530_queue_xmit - Queue a packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)  *	@c: The channel to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)  *	@skb: The packet to kick down the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)  *	Queue a packet for transmission. Because we have rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)  *	hard to hit interrupt latencies for the Z85230 per packet 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)  *	even in DMA mode we do the flip to DMA buffer if needed here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)  *	not in the IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)  *	Called from the network code. The lock is not held at this 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)  *	point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	netif_stop_queue(c->netdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	if(c->tx_next_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	/* PC SPECIFIC - DMA limits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	 *	If we will DMA the transmit and its gone over the ISA bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	 *	limit, then copy to the flip buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		/* 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		 *	Send the flip buffer, and flip the flippy bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		 *	We don't care which is used when just so long as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		 *	we never use the same buffer twice in a row. Since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		 *	only one buffer can be going out at a time the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		 *	has to be safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		c->tx_dma_used^=1;	/* Flip temp buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		c->tx_next_ptr=skb->data;	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	RT_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	c->tx_next_skb=skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	RT_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	spin_lock_irqsave(c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	z8530_tx_begin(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	spin_unlock_irqrestore(c->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) EXPORT_SYMBOL(z8530_queue_xmit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)  *	Module support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) static const char banner[] __initconst =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) static int __init z85230_init_driver(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	printk(banner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) module_init(z85230_init_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) static void __exit z85230_cleanup_driver(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) module_exit(z85230_cleanup_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) MODULE_AUTHOR("Red Hat Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) MODULE_DESCRIPTION("Z85x30 synchronous driver core");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) MODULE_LICENSE("GPL");