Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * wanXL serial card driver for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * host part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *   - Only DTE (external clock) support with NRZ and NRZI encodings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *   - wanXL100 will require minor driver modifications, no access to hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/hdlc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include "wanxl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static const char* version = "wanXL serial card driver version: 0.48";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define PLX_CTL_RESET   0x40000000 /* adapter reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #undef DEBUG_PKT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #undef DEBUG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) /* MAILBOX #1 - PUTS COMMANDS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #ifdef __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define MBX1_CMD_BSWAP  0x8C000001 /* little-endian Byte Swap Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define MBX1_CMD_BSWAP  0x8C000000 /* big-endian Byte Swap Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) /* MAILBOX #2 - DRAM SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) struct port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	spinlock_t lock;	/* for wanxl_xmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)         int node;		/* physical port #0 - 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	unsigned int clock_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	int tx_in, tx_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct sk_buff *tx_skbs[TX_BUFFERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) struct card_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	desc_t rx_descs[RX_QUEUE_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	port_status_t port_status[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) struct card {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	int n_ports;		/* 1, 2 or 4 ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	u8 irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	u8 __iomem *plx;	/* PLX PCI9060 virtual base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	struct pci_dev *pdev;	/* for pci_name(pdev) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	int rx_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct card_status *status;	/* shared between host and card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	dma_addr_t status_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	struct port ports[];	/* 1 - 4 port structures follow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static inline struct port *dev_to_port(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	return (struct port *)dev_to_hdlc(dev)->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static inline port_status_t *get_status(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	return &port->card->status->port_status[port->node];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #ifdef DEBUG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 					      size_t size, int direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	dma_addr_t addr = dma_map_single(&pdev->dev, ptr, size, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (addr + size > 0x100000000LL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			pci_name(pdev), (unsigned long long)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #undef pci_map_single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define pci_map_single pci_map_single_debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Cable and/or personality module change interrupt service */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline void wanxl_cable_intr(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	u32 value = get_status(port)->cable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	int valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	switch(value & 0x7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	case STATUS_CABLE_V35: cable = "V.35"; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	case STATUS_CABLE_X21: cable = "X.21"; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	case STATUS_CABLE_V24: cable = "V.24"; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	case STATUS_CABLE_EIA530: cable = "EIA530"; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	case STATUS_CABLE_NONE: cable = "no"; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	default: cable = "invalid";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	switch((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	case STATUS_CABLE_V35: pm = "V.35"; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	case STATUS_CABLE_X21: pm = "X.21"; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	case STATUS_CABLE_V24: pm = "V.24"; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	case STATUS_CABLE_EIA530: pm = "EIA530"; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	case STATUS_CABLE_NONE: pm = "no personality"; valid = 0; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	default: pm = "invalid personality"; valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if (valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 				", DSR off";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 				", carrier off";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	netdev_info(port->dev, "%s%s module, %s cable%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		    pm, dte, cable, dsr, dcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (value & STATUS_CABLE_DCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		netif_carrier_on(port->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		netif_carrier_off(port->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* Transmit complete interrupt service */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static inline void wanxl_tx_intr(struct port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct net_device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)                 desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		struct sk_buff *skb = port->tx_skbs[port->tx_in];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		switch (desc->stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		case PACKET_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		case PACKET_EMPTY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		case PACKET_UNDERRUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)                 desc->stat = PACKET_EMPTY; /* Free descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		dma_unmap_single(&port->card->pdev->dev, desc->address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 				 skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		dev_consume_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)                 port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Receive complete interrupt service */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static inline void wanxl_rx_intr(struct card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	desc_t *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	while (desc = &card->status->rx_descs[card->rx_in],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	       desc->stat != PACKET_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		if ((desc->stat & PACKET_PORT_MASK) > card->n_ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			pr_crit("%s: received packet for nonexistent port\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 				pci_name(card->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			struct sk_buff *skb = card->rx_skbs[card->rx_in];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			struct port *port = &card->ports[desc->stat &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 						    PACKET_PORT_MASK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			struct net_device *dev = port->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 				dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 				dma_unmap_single(&card->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 						 desc->address, BUFFER_LENGTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 						 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 				skb_put(skb, desc->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #ifdef DEBUG_PKT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 				printk(KERN_DEBUG "%s RX(%i):", dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 				       skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 				debug_frame(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 				dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 				dev->stats.rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 				skb->protocol = hdlc_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 				netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 				skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 				skb = dev_alloc_skb(BUFFER_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 				desc->address = skb ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 					dma_map_single(&card->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 						       skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 						       BUFFER_LENGTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 						       DMA_FROM_DEVICE) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 				card->rx_skbs[card->rx_in] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		desc->stat = PACKET_EMPTY; /* Free descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static irqreturn_t wanxl_intr(int irq, void* dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	struct card *card = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)         int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)         u32 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)         int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)         while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)                 handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)                 for (i = 0; i < card->n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 				wanxl_tx_intr(&card->ports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 				wanxl_cable_intr(&card->ports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		if (stat & (1 << DOORBELL_FROM_CARD_RX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			wanxl_rx_intr(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)         return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	struct port *port = dev_to_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	desc_t *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)         spin_lock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	desc = &get_status(port)->tx_descs[port->tx_out];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)         if (desc->stat != PACKET_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)                 /* should never happen - previous xmit should stop queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #ifdef DEBUG_PKT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)                 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		return NETDEV_TX_BUSY;       /* request packet to be queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #ifdef DEBUG_PKT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	debug_frame(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	port->tx_skbs[port->tx_out] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	desc->address = dma_map_single(&port->card->pdev->dev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 				       skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	desc->length = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	desc->stat = PACKET_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	       port->card->plx + PLX_DOORBELL_TO_CARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #ifdef DEBUG_PKT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	spin_unlock(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static int wanxl_attach(struct net_device *dev, unsigned short encoding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			unsigned short parity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	struct port *port = dev_to_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (encoding != ENCODING_NRZ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	    encoding != ENCODING_NRZI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (parity != PARITY_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	    parity != PARITY_CRC32_PR1_CCITT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	    parity != PARITY_CRC16_PR1_CCITT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	    parity != PARITY_CRC32_PR0_CCITT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	    parity != PARITY_CRC16_PR0_CCITT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	get_status(port)->encoding = encoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	get_status(port)->parity = parity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	const size_t size = sizeof(sync_serial_settings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	sync_serial_settings line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	struct port *port = dev_to_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	if (cmd != SIOCWANDEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		return hdlc_ioctl(dev, ifr, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	switch (ifr->ifr_settings.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	case IF_GET_IFACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		if (ifr->ifr_settings.size < size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			ifr->ifr_settings.size = size; /* data size wanted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		memset(&line, 0, sizeof(line));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		line.clock_type = get_status(port)->clocking;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		line.clock_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		line.loopback = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	case IF_IFACE_SYNC_SERIAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		if (dev->flags & IFF_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 				   size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		if (line.clock_type != CLOCK_EXT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		    line.clock_type != CLOCK_TXFROMRX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			return -EINVAL; /* No such clock setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		if (line.loopback != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		get_status(port)->clocking = line.clock_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		return hdlc_ioctl(dev, ifr, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static int wanxl_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	struct port *port = dev_to_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	if (get_status(port)->open) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		netdev_err(dev, "port already open\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	if ((i = hdlc_open(dev)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	port->tx_in = port->tx_out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	for (i = 0; i < TX_BUFFERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		get_status(port)->tx_descs[i].stat = PACKET_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	/* signal the card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	timeout = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		if (get_status(port)->open) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 			netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	} while (time_after(timeout, jiffies));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	netdev_err(dev, "unable to open port\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	/* ask the card to close the port, should it be still alive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int wanxl_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	struct port *port = dev_to_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	hdlc_close(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	/* signal the card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	       port->card->plx + PLX_DOORBELL_TO_CARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	timeout = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		if (!get_status(port)->open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	} while (time_after(timeout, jiffies));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	if (get_status(port)->open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		netdev_err(dev, "unable to close port\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	for (i = 0; i < TX_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		desc_t *desc = &get_status(port)->tx_descs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		if (desc->stat != PACKET_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			desc->stat = PACKET_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			dma_unmap_single(&port->card->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 					 desc->address, port->tx_skbs[i]->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 					 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 			dev_kfree_skb(port->tx_skbs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	struct port *port = dev_to_port(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	dev->stats.rx_over_errors = get_status(port)->rx_overruns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	dev->stats.rx_errors = dev->stats.rx_over_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		dev->stats.rx_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	return &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static int wanxl_puts_command(struct card *card, u32 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	unsigned long timeout = jiffies + 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	writel(cmd, card->plx + PLX_MAILBOX_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		if (readl(card->plx + PLX_MAILBOX_1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	}while (time_after(timeout, jiffies));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static void wanxl_reset(struct card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	writel(0x80, card->plx + PLX_MAILBOX_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	readl(card->plx + PLX_CONTROL); /* wait for posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	writel(old_value, card->plx + PLX_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	readl(card->plx + PLX_CONTROL); /* wait for posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static void wanxl_pci_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	struct card *card = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	for (i = 0; i < card->n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		unregister_hdlc_device(card->ports[i].dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		free_netdev(card->ports[i].dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	/* unregister and free all host resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	if (card->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		free_irq(card->irq, card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	wanxl_reset(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	for (i = 0; i < RX_QUEUE_LENGTH; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		if (card->rx_skbs[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 			dma_unmap_single(&card->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 					 card->status->rx_descs[i].address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 					 BUFFER_LENGTH, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 			dev_kfree_skb(card->rx_skbs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	if (card->plx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		iounmap(card->plx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	if (card->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		dma_free_coherent(&pdev->dev, sizeof(struct card_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 				  card->status, card->status_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	kfree(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) #include "wanxlfw.inc"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static const struct net_device_ops wanxl_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	.ndo_open       = wanxl_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	.ndo_stop       = wanxl_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	.ndo_start_xmit = hdlc_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	.ndo_do_ioctl   = wanxl_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	.ndo_get_stats  = wanxl_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static int wanxl_pci_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 			      const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	struct card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	u32 ramsize, stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	u32 plx_phy;		/* PLX PCI base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	u32 mem_phy;		/* memory PCI base addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	u8 __iomem *mem;	/* memory virtual base addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	int i, ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) #ifndef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	pr_info_once("%s\n", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	i = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	/* QUICC can only access first 256 MB of host RAM directly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	   but PLX9060 DMA does 32-bits for actual packet data transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	/* FIXME when PCI/DMA subsystems are fixed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	   We set both dma_mask and consistent_dma_mask to 28 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	   and pray pci_alloc_consistent() will use this info. It should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	   work on most platforms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(28)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	    dma_set_mask(&pdev->dev, DMA_BIT_MASK(28))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		pr_err("No usable DMA configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	i = pci_request_regions(pdev, "wanXL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	switch (pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	case PCI_DEVICE_ID_SBE_WANXL100: ports = 1; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	case PCI_DEVICE_ID_SBE_WANXL200: ports = 2; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	default: ports = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	if (card == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 		pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	pci_set_drvdata(pdev, card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	card->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	card->status = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 					  sizeof(struct card_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 					  &card->status_address, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	if (card->status == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) #ifdef DEBUG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	       " at 0x%LX\n", pci_name(pdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	       (unsigned long long)card->status_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	/* FIXME when PCI/DMA subsystems are fixed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	   We set both dma_mask and consistent_dma_mask back to 32 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	   to indicate the card can do 32-bit DMA addressing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	    dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		pr_err("No usable DMA configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	/* set up PLX mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	plx_phy = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	card->plx = ioremap(plx_phy, 0x70);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	if (!card->plx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		pr_err("ioremap() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)  		wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) #if RESET_WHILE_LOADING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	wanxl_reset(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	timeout = jiffies + 20 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		if (time_before(timeout, jiffies)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 			pr_warn("%s: timeout waiting for PUTS to complete\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 				pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 		switch(stat & 0xC0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		case 0x00:	/* hmm - PUTS completed with non-zero code? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		case 0x80:	/* PUTS still testing the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 			pr_warn("%s: PUTS test 0x%X failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 				pci_name(pdev), stat & 0x30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 			wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	/* get on-board memory size (PUTS detects no more than 4 MB) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	/* set up on-board RAM mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	mem_phy = pci_resource_start(pdev, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	/* sanity check the board's reported memory size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	if (ramsize < BUFFERS_ADDR +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	    (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 			pci_name(pdev), ramsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 			BUFFERS_ADDR +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 			(TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 		wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	for (i = 0; i < RX_QUEUE_LENGTH; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		card->rx_skbs[i] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 			card->status->rx_descs[i].address =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 				dma_map_single(&card->pdev->dev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 					       BUFFER_LENGTH, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	if (!mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 		pr_err("ioremap() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)  		wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	for (i = 0; i < sizeof(firmware); i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		writel(ntohl(*(__be32*)(firmware + i)), mem + PDM_OFFSET + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	for (i = 0; i < ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 		writel(card->status_address +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		       (void *)&card->status->port_status[i] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 		       (void *)card->status, mem + PDM_OFFSET + 4 + i * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	writel(card->status_address, mem + PDM_OFFSET + 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	writel(PDM_OFFSET, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	iounmap(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	writel(0, card->plx + PLX_MAILBOX_5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 		pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 		wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	timeout = jiffies + 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	}while (time_after(timeout, jiffies));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	if (!stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 		pr_warn("%s: timeout while initializing card firmware\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 			pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 		wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) #if DETECT_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	ramsize = stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 		pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	/* Allocate IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 		pr_warn("%s: could not allocate IRQ%i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 			pci_name(pdev), pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 		wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	card->irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	for (i = 0; i < ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 		hdlc_device *hdlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 		struct port *port = &card->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 		struct net_device *dev = alloc_hdlcdev(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 		if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 			pr_err("%s: unable to allocate memory\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 			       pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 			wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		port->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 		hdlc = dev_to_hdlc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 		spin_lock_init(&port->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 		dev->tx_queue_len = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 		dev->netdev_ops = &wanxl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 		hdlc->attach = wanxl_attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 		hdlc->xmit = wanxl_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 		port->card = card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 		port->node = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 		get_status(port)->clocking = CLOCK_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 		if (register_hdlc_device(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 			pr_err("%s: unable to register hdlc device\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 			       pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 			free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 			wanxl_pci_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 			return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		card->n_ports++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	pr_info("%s: port", pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	for (i = 0; i < ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		pr_cont("%s #%i: %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 			i ? "," : "", i, card->ports[i].dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	for (i = 0; i < ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 		wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static const struct pci_device_id wanxl_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 	  PCI_ANY_ID, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	  PCI_ANY_ID, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 	  PCI_ANY_ID, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	{ 0, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) static struct pci_driver wanxl_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	.name		= "wanXL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	.id_table	= wanxl_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	.probe		= wanxl_pci_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	.remove		= wanxl_pci_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static int __init wanxl_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) #ifdef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	pr_info("%s\n", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 	return pci_register_driver(&wanxl_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static void __exit wanxl_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 	pci_unregister_driver(&wanxl_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) module_init(wanxl_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) module_exit(wanxl_cleanup_module);