^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /* PLIP: A parallel port "network" driver for Linux. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Authors: Donald Becker <becker@scyld.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Tommy Thorn <thorn@daimi.aau.dk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Alan Cox <gw4pts@gw4pts.ampr.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Peter Bauer <100136.3530@compuserve.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Niibe Yutaka <gniibe@mri.co.jp>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Nimrod Zimerman <zimerman@mailandnews.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Enhancements:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Modularization and ifreq/ifmap support by Alan Cox.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Rewritten by Niibe Yutaka.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * parport-sharing awareness code by Philip Blundell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * SMP locking by Niibe Yutaka.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Support for parallel ports with no IRQ (poll mode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Modifications to use the parallel port API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * by Nimrod Zimerman.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Fixes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Niibe Yutaka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * - Module initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - MTU fix.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * - Make sure other end is OK, before sending a packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * - Fix immediate timer problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Al Viro
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * - Changed {enable,disable}_irq handling to make it work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * with new ("stack") semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * inspired by Russ Nelson's parallel port packet driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * NOTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Because of the necessity to communicate to DOS machines with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Crynwr packet driver, Peter Bauer changed the protocol again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * back to original protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * This version follows original PLIP protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * So, this PLIP can't communicate the PLIP of Linux v1.0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * To use with DOS box, please do (Turn on ARP switch):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * # ifconfig plip[0-2] arp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) Sources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) "parallel.asm" parallel port packet driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) The "Crynwr" parallel port standard specifies the following protocol:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) Trigger by sending nibble '0x8' (this causes interrupt on other end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) count-low octet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) count-high octet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ... data octets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) checksum octet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) The packet is encapsulated as if it were ethernet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) The cable used is a de facto standard parallel null cable -- sold as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) a "LapLink" cable by various places. You'll need a 12-conductor cable to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) make one yourself. The wiring is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) SLCTIN 17 - 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) GROUND 25 - 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) D0->ERROR 2 - 15 15 - 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) D1->SLCT 3 - 13 13 - 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) D2->PAPOUT 4 - 12 12 - 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) D3->ACK 5 - 10 10 - 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) D4->BUSY 6 - 11 11 - 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) Do not connect the other pins. They are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) D5,D6,D7 are 7,8,9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) STROBE is 1, FEED is 14, INIT is 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) extra grounds are 18,19,20,21,22,23,24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #include <linux/inetdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #include <linux/if_plip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #include <linux/parport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #include <net/neighbour.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Maximum number of devices to support. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define PLIP_MAX 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* Use 0 for production, 1 for verification, >2 for debug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #ifndef NET_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define NET_DEBUG 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static const unsigned int net_debug = NET_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define ENABLE(irq) if (irq != -1) enable_irq(irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define DISABLE(irq) if (irq != -1) disable_irq(irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* In micro second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define PLIP_DELAY_UNIT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define PLIP_TRIGGER_WAIT 500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define PLIP_NIBBLE_WAIT 3000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Bottom halves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void plip_kick_bh(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void plip_bh(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void plip_timer_bh(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void plip_interrupt(void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* Functions for DEV methods */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static netdev_tx_t plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned short type, const void *daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) const void *saddr, unsigned len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static int plip_hard_header_cache(const struct neighbour *neigh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct hh_cache *hh, __be16 type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static int plip_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static int plip_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static int plip_preempt(void *handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void plip_wakeup(void *handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) enum plip_connection_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) PLIP_CN_NONE=0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) PLIP_CN_RECEIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) PLIP_CN_SEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) PLIP_CN_CLOSING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) PLIP_CN_ERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) enum plip_packet_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) PLIP_PK_DONE=0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) PLIP_PK_TRIGGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) PLIP_PK_LENGTH_LSB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) PLIP_PK_LENGTH_MSB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) PLIP_PK_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) PLIP_PK_CHECKSUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) enum plip_nibble_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) PLIP_NB_BEGIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) PLIP_NB_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) PLIP_NB_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct plip_local {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) enum plip_packet_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) enum plip_nibble_state nibble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #if defined(__LITTLE_ENDIAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned char lsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned char msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #elif defined(__BIG_ENDIAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned char msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned char lsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #error "Please fix the endianness defines in <asm/byteorder.h>"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) } b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned short h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) } length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unsigned short byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned char checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned char data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct net_local {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct work_struct immediate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct delayed_work deferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct delayed_work timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct plip_local snd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct plip_local rcv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct pardevice *pardev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned long trigger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) unsigned long nibble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) enum plip_connection_state connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned short timeout_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int is_deferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int port_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int should_relinquish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) atomic_t kill_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct completion killed_timer_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static inline void enable_parport_interrupts (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (dev->irq != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct parport *port =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ((struct net_local *)netdev_priv(dev))->pardev->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) port->ops->enable_irq (port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline void disable_parport_interrupts (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (dev->irq != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct parport *port =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ((struct net_local *)netdev_priv(dev))->pardev->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) port->ops->disable_irq (port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static inline void write_data (struct net_device *dev, unsigned char data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct parport *port =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ((struct net_local *)netdev_priv(dev))->pardev->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) port->ops->write_data (port, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static inline unsigned char read_status (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct parport *port =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ((struct net_local *)netdev_priv(dev))->pardev->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return port->ops->read_status (port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static const struct header_ops plip_header_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .create = plip_hard_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .cache = plip_hard_header_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static const struct net_device_ops plip_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) .ndo_open = plip_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) .ndo_stop = plip_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) .ndo_start_xmit = plip_tx_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) .ndo_do_ioctl = plip_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* Entry point of PLIP driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) Probe the hardware, and register/initialize the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) PLIP is rather weird, because of the way it interacts with the parport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) system. It is _not_ initialised from Space.c. Instead, plip_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) is called, and that function makes up a "struct net_device" for each port, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) then calls us here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) plip_init_netdev(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct net_local *nl = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* Then, override parts of it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) dev->tx_queue_len = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) dev->flags = IFF_POINTOPOINT|IFF_NOARP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) memset(dev->dev_addr, 0xfc, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) dev->netdev_ops = &plip_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) dev->header_ops = &plip_header_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) nl->port_owner = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* Initialize constants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) nl->trigger = PLIP_TRIGGER_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) nl->nibble = PLIP_NIBBLE_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Initialize task queue structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) INIT_WORK(&nl->immediate, plip_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (dev->irq == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_lock_init(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Bottom half handler for the delayed request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) This routine is kicked by do_timer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) Request `plip_bh' to be invoked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) plip_kick_bh(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct net_local *nl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) container_of(work, struct net_local, deferred.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (nl->is_deferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) schedule_work(&nl->immediate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* Forward declarations of internal routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static int plip_none(struct net_device *, struct net_local *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct plip_local *, struct plip_local *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int plip_receive_packet(struct net_device *, struct net_local *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct plip_local *, struct plip_local *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int plip_send_packet(struct net_device *, struct net_local *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct plip_local *, struct plip_local *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static int plip_connection_close(struct net_device *, struct net_local *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct plip_local *, struct plip_local *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static int plip_error(struct net_device *, struct net_local *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct plip_local *, struct plip_local *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct plip_local *snd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct plip_local *rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) int error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #define OK 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) #define TIMEOUT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #define ERROR 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) #define HS_TIMEOUT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct plip_local *snd, struct plip_local *rcv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static const plip_func connection_state_table[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) plip_none,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) plip_receive_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) plip_send_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) plip_connection_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) plip_error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* Bottom half handler of PLIP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) plip_bh(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct net_local *nl = container_of(work, struct net_local, immediate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct plip_local *snd = &nl->snd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct plip_local *rcv = &nl->rcv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) plip_func f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) nl->is_deferred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) f = connection_state_table[nl->connection];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) nl->is_deferred = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) schedule_delayed_work(&nl->deferred, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) plip_timer_bh(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct net_local *nl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) container_of(work, struct net_local, timer.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!(atomic_read (&nl->kill_timer))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) plip_interrupt (nl->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) schedule_delayed_work(&nl->timer, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) complete(&nl->killed_timer_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct plip_local *snd, struct plip_local *rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) unsigned char c0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * This is tricky. If we got here from the beginning of send (either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * already disabled. With the old variant of {enable,disable}_irq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * extra disable_irq() was a no-op. Now it became mortal - it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * that is). So we have to treat HS_TIMEOUT and ERROR from send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * in a special way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) spin_lock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (nl->connection == PLIP_CN_SEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (error != ERROR) { /* Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) nl->timeout_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) nl->timeout_count <= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) spin_unlock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* Try again later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) c0 = read_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dev->name, snd->state, c0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) error = HS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) } else if (nl->connection == PLIP_CN_RECEIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (rcv->state == PLIP_PK_TRIGGER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* Transmission was interrupted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) spin_unlock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (error != ERROR) { /* Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (++nl->timeout_count <= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) spin_unlock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* Try again later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) c0 = read_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) dev->name, rcv->state, c0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) rcv->state = PLIP_PK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (rcv->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) kfree_skb(rcv->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) rcv->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) snd->state = PLIP_PK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (snd->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) dev_kfree_skb(snd->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) snd->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) spin_unlock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (error == HS_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) DISABLE(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) synchronize_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) disable_parport_interrupts (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) netif_stop_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) nl->connection = PLIP_CN_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) write_data (dev, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) plip_none(struct net_device *dev, struct net_local *nl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct plip_local *snd, struct plip_local *rcv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* PLIP_RECEIVE --- receive a byte(two nibbles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) Returns OK on success, TIMEOUT on timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) plip_receive(unsigned short nibble_timeout, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) enum plip_nibble_state *ns_p, unsigned char *data_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) unsigned char c0, c1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unsigned int cx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) switch (*ns_p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) case PLIP_NB_BEGIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) cx = nibble_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) c0 = read_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) udelay(PLIP_DELAY_UNIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if ((c0 & 0x80) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) c1 = read_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (c0 == c1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (--cx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *data_p = (c0 >> 3) & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) write_data (dev, 0x10); /* send ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) *ns_p = PLIP_NB_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) case PLIP_NB_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) cx = nibble_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) c0 = read_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) udelay(PLIP_DELAY_UNIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (c0 & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) c1 = read_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (c0 == c1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (--cx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) *data_p |= (c0 << 1) & 0xf0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) write_data (dev, 0x00); /* send ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) *ns_p = PLIP_NB_BEGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) case PLIP_NB_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Determine the packet's protocol ID. The rule here is that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * assume 802.3 if the type field is short enough to be a length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * This is normal practice and works for any 'now in use' protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * PLIP is ethernet ish but the daddr might not be valid if unicast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * PLIP fortunately has no bus architecture (its Point-to-point).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * We can't fix the daddr thing as that quirk (more bug) is embedded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * in far too many old systems not all even running Linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct ethhdr *eth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) unsigned char *rawp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) skb_pull(skb,dev->hard_header_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) eth = eth_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if(is_multicast_ether_addr(eth->h_dest))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) skb->pkt_type=PACKET_BROADCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) skb->pkt_type=PACKET_MULTICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * This ALLMULTI check should be redundant by 1.4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * so don't forget to remove it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return eth->h_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) rawp = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * This is a magic hack to spot IPX packets. Older Novell breaks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * the protocol design and runs IPX over 802.3 without an 802.2 LLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * won't work for fault tolerant netware but does for the rest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (*(unsigned short *)rawp == 0xFFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return htons(ETH_P_802_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * Real 802.2 LLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return htons(ETH_P_802_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* PLIP_RECEIVE_PACKET --- receive a packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) plip_receive_packet(struct net_device *dev, struct net_local *nl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct plip_local *snd, struct plip_local *rcv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) unsigned short nibble_timeout = nl->nibble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) unsigned char *lbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) switch (rcv->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) case PLIP_PK_TRIGGER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) DISABLE(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* Don't need to synchronize irq, as we can safely ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) disable_parport_interrupts (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) write_data (dev, 0x01); /* send ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (net_debug > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) printk(KERN_DEBUG "%s: receive start\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) rcv->state = PLIP_PK_LENGTH_LSB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) rcv->nibble = PLIP_NB_BEGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) case PLIP_PK_LENGTH_LSB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (snd->state != PLIP_PK_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (plip_receive(nl->trigger, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) &rcv->nibble, &rcv->length.b.lsb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* collision, here dev->tbusy == 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) rcv->state = PLIP_PK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) nl->is_deferred = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) nl->connection = PLIP_CN_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) schedule_delayed_work(&nl->deferred, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) enable_parport_interrupts (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ENABLE(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (plip_receive(nibble_timeout, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) &rcv->nibble, &rcv->length.b.lsb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) rcv->state = PLIP_PK_LENGTH_MSB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) case PLIP_PK_LENGTH_MSB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (plip_receive(nibble_timeout, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) &rcv->nibble, &rcv->length.b.msb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (rcv->length.h > dev->mtu + dev->hard_header_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) rcv->length.h < 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* Malloc up new buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) rcv->skb = dev_alloc_skb(rcv->length.h + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (rcv->skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) skb_put(rcv->skb,rcv->length.h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) rcv->skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) rcv->state = PLIP_PK_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) rcv->byte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) rcv->checksum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) case PLIP_PK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) lbuf = rcv->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (plip_receive(nibble_timeout, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) &rcv->nibble, &lbuf[rcv->byte]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) } while (++rcv->byte < rcv->length.h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) rcv->checksum += lbuf[--rcv->byte];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) } while (rcv->byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) rcv->state = PLIP_PK_CHECKSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) case PLIP_PK_CHECKSUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (plip_receive(nibble_timeout, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) &rcv->nibble, &rcv->data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (rcv->data != rcv->checksum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (net_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) printk(KERN_DEBUG "%s: checksum error\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) rcv->state = PLIP_PK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) case PLIP_PK_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /* Inform the upper layer for the arrival of a packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) netif_rx_ni(rcv->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) dev->stats.rx_bytes += rcv->length.h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) rcv->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (net_debug > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) printk(KERN_DEBUG "%s: receive end\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* Close the connection. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) write_data (dev, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) spin_lock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (snd->state != PLIP_PK_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) nl->connection = PLIP_CN_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) spin_unlock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) schedule_work(&nl->immediate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) enable_parport_interrupts (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ENABLE(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) nl->connection = PLIP_CN_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) spin_unlock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) enable_parport_interrupts (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ENABLE(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* PLIP_SEND --- send a byte (two nibbles)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) Returns OK on success, TIMEOUT when timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) plip_send(unsigned short nibble_timeout, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) enum plip_nibble_state *ns_p, unsigned char data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) unsigned char c0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) unsigned int cx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) switch (*ns_p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) case PLIP_NB_BEGIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) write_data (dev, data & 0x0f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) *ns_p = PLIP_NB_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) case PLIP_NB_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) write_data (dev, 0x10 | (data & 0x0f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) cx = nibble_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) c0 = read_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if ((c0 & 0x80) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (--cx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) udelay(PLIP_DELAY_UNIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) write_data (dev, 0x10 | (data >> 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) *ns_p = PLIP_NB_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) case PLIP_NB_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) write_data (dev, (data >> 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) cx = nibble_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) c0 = read_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (c0 & 0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (--cx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) udelay(PLIP_DELAY_UNIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) *ns_p = PLIP_NB_BEGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* PLIP_SEND_PACKET --- send a packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) plip_send_packet(struct net_device *dev, struct net_local *nl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct plip_local *snd, struct plip_local *rcv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) unsigned short nibble_timeout = nl->nibble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) unsigned char *lbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) unsigned char c0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) unsigned int cx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) snd->state = PLIP_PK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) snd->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) switch (snd->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) case PLIP_PK_TRIGGER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if ((read_status(dev) & 0xf8) != 0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return HS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /* Trigger remote rx interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) write_data (dev, 0x08);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) cx = nl->trigger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) udelay(PLIP_DELAY_UNIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) spin_lock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (nl->connection == PLIP_CN_RECEIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) spin_unlock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /* Interrupted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) c0 = read_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (c0 & 0x08) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) spin_unlock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) DISABLE(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) synchronize_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (nl->connection == PLIP_CN_RECEIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Interrupted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) We don't need to enable irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) as it is soon disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Yes, we do. New variant of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {enable,disable}_irq *counts*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) them. -- AV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ENABLE(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) disable_parport_interrupts (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (net_debug > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) printk(KERN_DEBUG "%s: send start\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) snd->state = PLIP_PK_LENGTH_LSB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) snd->nibble = PLIP_NB_BEGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) nl->timeout_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) spin_unlock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (--cx == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) write_data (dev, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return HS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) case PLIP_PK_LENGTH_LSB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (plip_send(nibble_timeout, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) &snd->nibble, snd->length.b.lsb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) snd->state = PLIP_PK_LENGTH_MSB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) case PLIP_PK_LENGTH_MSB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (plip_send(nibble_timeout, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) &snd->nibble, snd->length.b.msb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) snd->state = PLIP_PK_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) snd->byte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) snd->checksum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) case PLIP_PK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (plip_send(nibble_timeout, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) &snd->nibble, lbuf[snd->byte]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) } while (++snd->byte < snd->length.h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) snd->checksum += lbuf[--snd->byte];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) } while (snd->byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) snd->state = PLIP_PK_CHECKSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) case PLIP_PK_CHECKSUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (plip_send(nibble_timeout, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) &snd->nibble, snd->checksum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) dev->stats.tx_bytes += snd->skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dev_kfree_skb(snd->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) snd->state = PLIP_PK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) case PLIP_PK_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /* Close the connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) write_data (dev, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) snd->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (net_debug > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) printk(KERN_DEBUG "%s: send end\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) nl->connection = PLIP_CN_CLOSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) nl->is_deferred = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) schedule_delayed_work(&nl->deferred, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) enable_parport_interrupts (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) ENABLE(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) plip_connection_close(struct net_device *dev, struct net_local *nl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct plip_local *snd, struct plip_local *rcv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) spin_lock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (nl->connection == PLIP_CN_CLOSING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) nl->connection = PLIP_CN_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) netif_wake_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) spin_unlock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (nl->should_relinquish) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) nl->should_relinquish = nl->port_owner = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) parport_release(nl->pardev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* PLIP_ERROR --- wait till other end settled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) plip_error(struct net_device *dev, struct net_local *nl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct plip_local *snd, struct plip_local *rcv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) status = read_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if ((status & 0xf8) == 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (net_debug > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) nl->connection = PLIP_CN_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) nl->should_relinquish = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) netif_start_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) enable_parport_interrupts (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) ENABLE(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) netif_wake_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) nl->is_deferred = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) schedule_delayed_work(&nl->deferred, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* Handle the parallel port interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) plip_interrupt(void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) struct net_local *nl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct plip_local *rcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) unsigned char c0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) nl = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) rcv = &nl->rcv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) spin_lock_irqsave (&nl->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) c0 = read_status(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if ((c0 & 0xf8) != 0xc0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if ((dev->irq != -1) && (net_debug > 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) spin_unlock_irqrestore (&nl->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (net_debug > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) switch (nl->connection) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) case PLIP_CN_CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) netif_wake_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) case PLIP_CN_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) case PLIP_CN_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) rcv->state = PLIP_PK_TRIGGER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) nl->connection = PLIP_CN_RECEIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) nl->timeout_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) schedule_work(&nl->immediate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) case PLIP_CN_RECEIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* May occur because there is race condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) around test and set of dev->interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) Ignore this interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) case PLIP_CN_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) spin_unlock_irqrestore(&nl->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct net_local *nl = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) struct plip_local *snd = &nl->snd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (netif_queue_stopped(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /* We may need to grab the bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (!nl->port_owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (parport_claim(nl->pardev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) nl->port_owner = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) netif_stop_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (skb->len > dev->mtu + dev->hard_header_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) netif_start_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (net_debug > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) printk(KERN_DEBUG "%s: send request\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) spin_lock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) snd->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) snd->length.h = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) snd->state = PLIP_PK_TRIGGER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (nl->connection == PLIP_CN_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) nl->connection = PLIP_CN_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) nl->timeout_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) schedule_work(&nl->immediate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) spin_unlock_irq(&nl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) const struct in_device *in_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) in_dev = __in_dev_get_rcu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (in_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /* Any address will do - we take the first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (ifa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) memset(eth->h_dest, 0xfc, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) plip_hard_header(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) unsigned short type, const void *daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) const void *saddr, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ret = eth_header(skb, dev, type, daddr, saddr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) plip_rewrite_address (dev, (struct ethhdr *)skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static int plip_hard_header_cache(const struct neighbour *neigh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) struct hh_cache *hh, __be16 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) ret = eth_header_cache(neigh, hh, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct ethhdr *eth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) eth = (struct ethhdr*)(((u8*)hh->hh_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) HH_DATA_OFF(sizeof(*eth)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) plip_rewrite_address (neigh->dev, eth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* Open/initialize the board. This is called (in the current kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) sometime after booting when the 'ifconfig' program is run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) This routine gets exclusive access to the parallel port by allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) its IRQ line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) plip_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct net_local *nl = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct in_device *in_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /* Grab the port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (!nl->port_owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (parport_claim(nl->pardev)) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) nl->port_owner = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) nl->should_relinquish = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /* Clear the data port. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) write_data (dev, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /* Enable rx interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) enable_parport_interrupts (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (dev->irq == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) atomic_set (&nl->kill_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) schedule_delayed_work(&nl->timer, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) /* Initialize the state machine. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) nl->rcv_data.skb = nl->snd_data.skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) nl->connection = PLIP_CN_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) nl->is_deferred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /* Fill in the MAC-level header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) We used to abuse dev->broadcast to store the point-to-point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) MAC address, but we no longer do it. Instead, we fetch the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) interface address whenever it is needed, which is cheap enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) because we use the hh_cache. Actually, abusing dev->broadcast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) didn't work, because when using plip_open the point-to-point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) address isn't yet known.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) PLIP doesn't have a real MAC address, but we need it to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) DOS compatible, and to properly support taps (otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) when the device address isn't identical to the address of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) received frame, the kernel incorrectly drops it). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) in_dev=__in_dev_get_rtnl(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (in_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* Any address will do - we take the first. We already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) have the first two bytes filled with 0xfc, from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) plip_init_dev(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (ifa != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) netif_start_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* The inverse routine to plip_open (). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) plip_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct net_local *nl = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct plip_local *snd = &nl->snd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct plip_local *rcv = &nl->rcv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) netif_stop_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) DISABLE(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) synchronize_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (dev->irq == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) init_completion(&nl->killed_timer_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) atomic_set (&nl->kill_timer, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) wait_for_completion(&nl->killed_timer_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) #ifdef NOTDEF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) outb(0x00, PAR_DATA(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) nl->is_deferred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) nl->connection = PLIP_CN_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (nl->port_owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) parport_release(nl->pardev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) nl->port_owner = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) snd->state = PLIP_PK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (snd->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) dev_kfree_skb(snd->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) snd->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) rcv->state = PLIP_PK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (rcv->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) kfree_skb(rcv->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) rcv->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) #ifdef NOTDEF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /* Reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) outb(0x00, PAR_CONTROL(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) plip_preempt(void *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct net_device *dev = (struct net_device *)handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct net_local *nl = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* Stand our ground if a datagram is on the wire */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (nl->connection != PLIP_CN_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) nl->should_relinquish = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) nl->port_owner = 0; /* Remember that we released the bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) plip_wakeup(void *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct net_device *dev = (struct net_device *)handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct net_local *nl = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (nl->port_owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /* Why are we being woken up? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (!parport_claim(nl->pardev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /* bus_owner is already set (but why?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (!(dev->flags & IFF_UP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /* Don't need the port when the interface is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (!parport_claim(nl->pardev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) nl->port_owner = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /* Clear the data port. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) write_data (dev, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct net_local *nl = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (cmd != SIOCDEVPLIP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) switch(pc->pcmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) case PLIP_GET_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) pc->trigger = nl->trigger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) pc->nibble = nl->nibble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) case PLIP_SET_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if(!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) nl->trigger = pc->trigger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) nl->nibble = pc->nibble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) static int timid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) module_param_array(parport, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) module_param(timid, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) plip_searchfor(int list[], int a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (list[i] == a) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /* plip_attach() is called (by the parport code) when a port is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * available to use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static void plip_attach (struct parport *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static int unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct net_local *nl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) char name[IFNAMSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct pardev_cb plip_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if ((parport[0] == -1 && (!timid || !port->devices)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) plip_searchfor(parport, port->number)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (unit == PLIP_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) printk(KERN_ERR "plip: too many devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) sprintf(name, "plip%d", unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) dev = alloc_etherdev(sizeof(struct net_local));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) strcpy(dev->name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) dev->irq = port->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) dev->base_addr = port->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (port->irq == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) "which is fairly inefficient!\n", port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) nl = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) nl->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) memset(&plip_cb, 0, sizeof(plip_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) plip_cb.private = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) plip_cb.preempt = plip_preempt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) plip_cb.wakeup = plip_wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) plip_cb.irq_func = plip_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) nl->pardev = parport_register_dev_model(port, dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) &plip_cb, unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!nl->pardev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) printk(KERN_ERR "%s: parport_register failed\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) goto err_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) plip_init_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (register_netdev(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) printk(KERN_ERR "%s: network register failed\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) goto err_parport_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) printk(KERN_INFO "%s", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (dev->irq != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) printk(KERN_INFO "%s: Parallel port at %#3lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) "using IRQ %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) dev->name, dev->base_addr, dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) printk(KERN_INFO "%s: Parallel port at %#3lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) "not using IRQ.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) dev->name, dev->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) dev_plip[unit++] = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) err_parport_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) parport_unregister_device(nl->pardev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) err_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /* plip_detach() is called (by the parport code) when a port is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * no longer available to use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) static void plip_detach (struct parport *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) static int plip_probe(struct pardevice *par_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) struct device_driver *drv = par_dev->dev.driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) int len = strlen(drv->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (strncmp(par_dev->name, drv->name, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static struct parport_driver plip_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) .name = "plip",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) .probe = plip_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) .match_port = plip_attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) .detach = plip_detach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) .devmodel = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static void __exit plip_cleanup_module (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) for (i=0; i < PLIP_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if ((dev = dev_plip[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) struct net_local *nl = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (nl->port_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) parport_release(nl->pardev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) parport_unregister_device(nl->pardev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) dev_plip[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) parport_unregister_driver(&plip_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) #ifndef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static int parport_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static int __init plip_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) int ints[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) str = get_options(str, ARRAY_SIZE(ints), ints);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /* Ugh. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (!strncmp(str, "parport", 7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) int n = simple_strtoul(str+7, NULL, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (parport_ptr < PLIP_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) parport[parport_ptr++] = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) printk(KERN_INFO "plip: too many ports, %s ignored.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) } else if (!strcmp(str, "timid")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) timid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (ints[0] == 0 || ints[1] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /* disable driver on "plip=" or "plip=0" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) parport[0] = -2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) ints[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) __setup("plip=", plip_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) #endif /* !MODULE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static int __init plip_init (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (parport[0] == -2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (parport[0] != -1 && timid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) timid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (parport_register_driver (&plip_driver)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) printk (KERN_WARNING "plip: couldn't register driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) module_init(plip_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) module_exit(plip_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) MODULE_LICENSE("GPL");