^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for high-speed SCC boards (those with DMA support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1997-2000 Klaus Kudielka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * S5SCC/DMA support by Janko Koleznik S52HI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sockios.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <net/ax25.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "z8530.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Number of buffers per channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Cards supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) 0, 8, 1843200, 3686400 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) 0, 8, 3686400, 7372800 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) 0, 4, 6144000, 6144000 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) 0, 8, 4915200, 9830400 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define TMR_0_HZ 25600 /* Frequency of timer 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define TYPE_PI 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define TYPE_PI2 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define TYPE_TWIN 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define TYPE_S5 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define NUM_TYPES 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define MAX_NUM_DEVS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* SCC chips supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define Z8530 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define Z85C30 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define Z85230 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* I/O registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* 8530 registers relative to card base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define SCCB_CMD 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define SCCB_DATA 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define SCCA_CMD 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define SCCA_DATA 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* 8253/8254 registers relative to card base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define TMR_CNT0 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define TMR_CNT1 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define TMR_CNT2 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define TMR_CTRL 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Additional PI/PI2 registers relative to card base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define PI_DREQ_MASK 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Additional PackeTwin registers relative to card base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define TWIN_INT_REG 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define TWIN_CLR_TMR1 0x09
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define TWIN_CLR_TMR2 0x0a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define TWIN_SPARE_1 0x0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define TWIN_DMA_CFG 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define TWIN_SERIAL_CFG 0x09
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define TWIN_DMA_CLR_FF 0x0a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define TWIN_SPARE_2 0x0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* PackeTwin I/O register values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* INT_REG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define TWIN_SCC_MSK 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define TWIN_TMR1_MSK 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define TWIN_TMR2_MSK 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define TWIN_INT_MSK 0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* SERIAL_CFG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define TWIN_DTRA_ON 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define TWIN_DTRB_ON 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define TWIN_EXTCLKA 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define TWIN_EXTCLKB 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define TWIN_LOOPA_ON 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define TWIN_LOOPB_ON 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define TWIN_EI 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* DMA_CFG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define TWIN_DMA_HDX_T1 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define TWIN_DMA_HDX_R1 0x0a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define TWIN_DMA_HDX_T3 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define TWIN_DMA_HDX_R3 0x16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define TWIN_DMA_FDX_T3R1 0x1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define TWIN_DMA_FDX_T1R3 0x1d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Status values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define IDLE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define TX_HEAD 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define TX_DATA 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define TX_PAUSE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define TX_TAIL 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define RTS_OFF 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define WAIT 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define DCD_ON 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define RX_ON 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define DCD_OFF 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* Ioctls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define SIOCGSCCPARAM SIOCDEVPRIVATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Data types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct scc_param {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int pclk_hz; /* frequency of BRG input (don't change) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int nrzi; /* 0 (nrz), 1 (nrzi) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int clocks; /* see dmascc_cfg documentation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int txdelay; /* [1/TMR_0_HZ] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int txtimeout; /* [1/HZ] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int txtail; /* [1/TMR_0_HZ] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int waittime; /* [1/TMR_0_HZ] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int slottime; /* [1/TMR_0_HZ] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int persist; /* 1 ... 256 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int dma; /* -1 (disable), 0, 1, 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int txpause; /* [1/TMR_0_HZ] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int rtsoff; /* [1/TMR_0_HZ] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int dcdon; /* [1/TMR_0_HZ] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int dcdoff; /* [1/TMR_0_HZ] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct scc_hardware {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int io_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int io_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int io_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int num_devs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int scc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int tmr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int tmr_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int pclk_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct scc_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct scc_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int card_base, scc_cmd, scc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int tmr_cnt, tmr_ctrl, tmr_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct scc_param param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) char rx_buf[NUM_RX_BUF][BUF_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int rx_len[NUM_RX_BUF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int rx_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct work_struct rx_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int rx_head, rx_tail, rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int rx_over;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) char tx_buf[NUM_TX_BUF][BUF_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int tx_len[NUM_TX_BUF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int tx_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int tx_head, tx_tail, tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned long tx_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int rr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) spinlock_t *register_lock; /* Per scc_info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) spinlock_t ring_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct scc_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int irq_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int twin_serial_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct net_device *dev[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct scc_priv priv[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct scc_info *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) spinlock_t register_lock; /* Per device register lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* Function declarations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int setup_adapter(int card_base, int type, int n) __init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static void write_scc(struct scc_priv *priv, int reg, int val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void write_scc_data(struct scc_priv *priv, int val, int fast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static int read_scc(struct scc_priv *priv, int reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int read_scc_data(struct scc_priv *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int scc_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static int scc_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static int scc_set_mac_address(struct net_device *dev, void *sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static inline void tx_on(struct scc_priv *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline void rx_on(struct scc_priv *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static inline void rx_off(struct scc_priv *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static void start_timer(struct scc_priv *priv, int t, int r15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static inline unsigned char random(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static inline void z8530_isr(struct scc_info *info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static irqreturn_t scc_isr(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static void rx_isr(struct scc_priv *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void special_condition(struct scc_priv *priv, int rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void rx_bh(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void tx_isr(struct scc_priv *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void es_isr(struct scc_priv *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void tm_isr(struct scc_priv *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Initialization variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static int io[MAX_NUM_DEVS] __initdata = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* Beware! hw[] is also used in dmascc_exit(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* Global variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static struct scc_info *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static unsigned long rand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) MODULE_AUTHOR("Klaus Kudielka");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) MODULE_DESCRIPTION("Driver for high-speed SCC boards");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) module_param_hw_array(io, int, ioport, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void __exit dmascc_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct scc_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) while (first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) info = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* Unregister devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) for (i = 0; i < 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) unregister_netdev(info->dev[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* Reset board */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (info->priv[0].type == TYPE_TWIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) write_scc(&info->priv[0], R9, FHWRES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) release_region(info->dev[0]->base_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) hw[info->priv[0].type].io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) for (i = 0; i < 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) free_netdev(info->dev[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* Free memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) first = info->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) kfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static int __init dmascc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int h, i, j, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) t1[MAX_NUM_DEVS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned t_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) counting[MAX_NUM_DEVS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Initialize random number generator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) rand = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Cards found = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* Warning message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!io[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Run autodetection for each card type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) for (h = 0; h < NUM_TYPES; h++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (io[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* User-specified I/O address regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) for (i = 0; i < hw[h].num_devs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) base[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) j = (io[i] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) hw[h].io_region) / hw[h].io_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (j >= 0 && j < hw[h].num_devs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) hw[h].io_region +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) j * hw[h].io_delta == io[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) base[j] = io[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* Default I/O address regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) for (i = 0; i < hw[h].num_devs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) base[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) hw[h].io_region + i * hw[h].io_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* Check valid I/O address regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) for (i = 0; i < hw[h].num_devs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (base[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (!request_region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) (base[i], hw[h].io_size, "dmascc"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) base[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) tcmd[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) base[i] + hw[h].tmr_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) TMR_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) t0[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) base[i] + hw[h].tmr_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) TMR_CNT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) t1[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) base[i] + hw[h].tmr_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) TMR_CNT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* Start timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) for (i = 0; i < hw[h].num_devs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (base[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) outb(0x36, tcmd[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) t0[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) t0[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) outb(0x70, tcmd[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) start[i] = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) delay[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) counting[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* Timer 2: LSB+MSB, Mode 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) outb(0xb0, tcmd[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* Wait until counter registers are loaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) udelay(2000000 / TMR_0_HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Timing loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) while (jiffies - time < 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) for (i = 0; i < hw[h].num_devs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (base[i] && counting[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* Read back Timer 1: latch; read LSB; read MSB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) outb(0x40, tcmd[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) t_val =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) inb(t1[i]) + (inb(t1[i]) << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Also check whether counter did wrap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (t_val == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) t_val > TMR_0_HZ / HZ * 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) counting[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) delay[i] = jiffies - start[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* Evaluate measurements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) for (i = 0; i < hw[h].num_devs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (base[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if ((delay[i] >= 9 && delay[i] <= 11) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* Ok, we have found an adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) (setup_adapter(base[i], h, n) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) release_region(base[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) hw[h].io_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) } /* NUM_TYPES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* If any adapter was successfully initialized, return ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* If no adapter found, return error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) printk(KERN_INFO "dmascc: no adapters found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) module_init(dmascc_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) module_exit(dmascc_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static void __init dev_setup(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dev->type = ARPHRD_AX25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dev->hard_header_len = AX25_MAX_HEADER_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) dev->mtu = 1500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) dev->addr_len = AX25_ADDR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dev->tx_queue_len = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static const struct net_device_ops scc_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) .ndo_open = scc_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) .ndo_stop = scc_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) .ndo_start_xmit = scc_send_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) .ndo_do_ioctl = scc_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) .ndo_set_mac_address = scc_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int __init setup_adapter(int card_base, int type, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int i, irq, chip, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct scc_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct scc_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) unsigned long time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) unsigned int irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int tmr_base = card_base + hw[type].tmr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int scc_base = card_base + hw[type].scc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) char *chipnames[] = CHIPNAMES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* Initialize what is necessary for write_scc and write_scc_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!info->dev[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) printk(KERN_ERR "dmascc: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) "could not allocate memory for %s at %#3x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) hw[type].name, card_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!info->dev[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) printk(KERN_ERR "dmascc: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) "could not allocate memory for %s at %#3x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) hw[type].name, card_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) spin_lock_init(&info->register_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) priv = &info->priv[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) priv->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) priv->card_base = card_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) priv->scc_cmd = scc_base + SCCA_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) priv->scc_data = scc_base + SCCA_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) priv->register_lock = &info->register_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* Reset SCC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) write_scc(priv, R9, FHWRES | MIE | NV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* Determine type of chip by enabling SDLC/HDLC enhancements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) write_scc(priv, R15, SHDLCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!read_scc(priv, R15)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* WR7' not present. This is an ordinary Z8530 SCC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) chip = Z8530;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* Put one character in TX FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) write_scc_data(priv, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (read_scc(priv, R0) & Tx_BUF_EMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) chip = Z85230;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) chip = Z85C30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) write_scc(priv, R15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Start IRQ auto-detection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) irqs = probe_irq_on();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (type == TYPE_TWIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) outb(0, card_base + TWIN_DMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) inb(card_base + TWIN_CLR_TMR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) inb(card_base + TWIN_CLR_TMR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) info->twin_serial_cfg = TWIN_EI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) write_scc(priv, R15, CTSIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) write_scc(priv, R0, RES_EXT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) write_scc(priv, R1, EXT_INT_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* Start timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) outb(1, tmr_base + TMR_CNT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) outb(0, tmr_base + TMR_CNT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* Wait and detect IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) while (jiffies - time < 2 + HZ / TMR_0_HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) irq = probe_irq_off(irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* Clear pending interrupt, disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (type == TYPE_TWIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) inb(card_base + TWIN_CLR_TMR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) write_scc(priv, R1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) write_scc(priv, R15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) write_scc(priv, R0, RES_EXT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (irq <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) hw[type].name, card_base, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) goto out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /* Set up data structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) dev = info->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) priv = &info->priv[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) priv->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) priv->chip = chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) priv->info = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) priv->channel = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) spin_lock_init(&priv->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) priv->register_lock = &info->register_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) priv->card_base = card_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) priv->tmr_ctrl = tmr_base + TMR_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) priv->tmr_mode = i ? 0xb0 : 0x70;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) priv->param.pclk_hz = hw[type].pclk_hz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) priv->param.brg_tc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) priv->param.clocks = TCTRxCP | RCRTxCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) priv->param.persist = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) priv->param.dma = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) INIT_WORK(&priv->rx_work, rx_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) dev->ml_priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dev->base_addr = card_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) dev->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) dev->netdev_ops = &scc_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dev->header_ops = &ax25_header_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (register_netdev(info->dev[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) printk(KERN_ERR "dmascc: could not register %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) info->dev[0]->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) goto out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (register_netdev(info->dev[1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) printk(KERN_ERR "dmascc: could not register %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) info->dev[1]->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) goto out4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) info->next = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) first = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) hw[type].name, chipnames[chip], card_base, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) out4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) unregister_netdev(info->dev[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) out3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (info->priv[0].type == TYPE_TWIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) write_scc(&info->priv[0], R9, FHWRES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) free_netdev(info->dev[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) free_netdev(info->dev[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) kfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* Driver functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static void write_scc(struct scc_priv *priv, int reg, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) switch (priv->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) case TYPE_S5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) outb(reg, priv->scc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) outb(val, priv->scc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) case TYPE_TWIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) outb_p(reg, priv->scc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) outb_p(val, priv->scc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) spin_lock_irqsave(priv->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) outb_p(0, priv->card_base + PI_DREQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) outb_p(reg, priv->scc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) outb_p(val, priv->scc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) outb(1, priv->card_base + PI_DREQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) spin_unlock_irqrestore(priv->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static void write_scc_data(struct scc_priv *priv, int val, int fast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) switch (priv->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) case TYPE_S5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) outb(val, priv->scc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) case TYPE_TWIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) outb_p(val, priv->scc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (fast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) outb_p(val, priv->scc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) spin_lock_irqsave(priv->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) outb_p(0, priv->card_base + PI_DREQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) outb_p(val, priv->scc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) outb(1, priv->card_base + PI_DREQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) spin_unlock_irqrestore(priv->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static int read_scc(struct scc_priv *priv, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) switch (priv->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) case TYPE_S5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) outb(reg, priv->scc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return inb(priv->scc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) case TYPE_TWIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) outb_p(reg, priv->scc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return inb_p(priv->scc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) spin_lock_irqsave(priv->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) outb_p(0, priv->card_base + PI_DREQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) outb_p(reg, priv->scc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) rc = inb_p(priv->scc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) outb(1, priv->card_base + PI_DREQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) spin_unlock_irqrestore(priv->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static int read_scc_data(struct scc_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) switch (priv->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) case TYPE_S5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return inb(priv->scc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) case TYPE_TWIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return inb_p(priv->scc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) spin_lock_irqsave(priv->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) outb_p(0, priv->card_base + PI_DREQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) rc = inb_p(priv->scc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) outb(1, priv->card_base + PI_DREQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) spin_unlock_irqrestore(priv->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static int scc_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct scc_priv *priv = dev->ml_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct scc_info *info = priv->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) int card_base = priv->card_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* Request IRQ if not already used by other channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (!info->irq_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) info->irq_used++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* Request DMA if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (priv->param.dma >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (request_dma(priv->param.dma, "dmascc")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (--info->irq_used == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) free_irq(dev->irq, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) unsigned long flags = claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) clear_dma_ff(priv->param.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) release_dma_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* Initialize local variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) priv->rx_ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) priv->rx_over = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) priv->rx_head = priv->rx_tail = priv->rx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) priv->state = IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) priv->tx_head = priv->tx_tail = priv->tx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) priv->tx_ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* Reset channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* X1 clock, SDLC mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) write_scc(priv, R4, SDLC | X1CLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /* 8 bit RX char, RX disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) write_scc(priv, R3, Rx8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /* 8 bit TX char, TX disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) write_scc(priv, R5, Tx8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* SDLC address field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) write_scc(priv, R6, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /* SDLC flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) write_scc(priv, R7, FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) switch (priv->chip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) case Z85C30:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* Select WR7' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) write_scc(priv, R15, SHDLCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* Auto EOM reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) write_scc(priv, R7, AUTOEOM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) write_scc(priv, R15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) case Z85230:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* Select WR7' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) write_scc(priv, R15, SHDLCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* The following bits are set (see 2.5.2.1):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) - Automatic EOM reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) - Interrupt request if RX FIFO is half full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) This bit should be ignored in DMA mode (according to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) documentation), but actually isn't. The receiver doesn't work if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) it is set. Thus, we have to clear it in DMA mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) - Interrupt/DMA request if TX FIFO is completely empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) compatibility).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) b) If cleared, DMA requests may follow each other very quickly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) filling up the TX FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) Advantage: TX works even in case of high bus latency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) Disadvantage: Edge-triggered DMA request circuitry may miss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) a request. No more data is delivered, resulting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) in a TX FIFO underrun.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) The PackeTwin doesn't. I don't know about the PI, but let's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) assume it behaves like the PI2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (priv->param.dma >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (priv->type == TYPE_TWIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) write_scc(priv, R7, AUTOEOM | TXFIFOE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) write_scc(priv, R7, AUTOEOM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) write_scc(priv, R7, AUTOEOM | RXFIFOH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) write_scc(priv, R15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* Preset CRC, NRZ(I) encoding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* Configure baud rate generator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (priv->param.brg_tc >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /* Program BR generator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) write_scc(priv, R12, priv->param.brg_tc & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) PackeTwin, not connected on the PI2); set DPLL source to BRG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Enable DPLL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /* Disable BR generator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) write_scc(priv, R14, DTRREQ | BRSRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* Configure clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (priv->type == TYPE_TWIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* Disable external TX clock receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) outb((info->twin_serial_cfg &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) card_base + TWIN_SERIAL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) write_scc(priv, R11, priv->param.clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* Enable external TX clock receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) outb((info->twin_serial_cfg |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) card_base + TWIN_SERIAL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* Configure PackeTwin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (priv->type == TYPE_TWIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* Assert DTR, enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) outb((info->twin_serial_cfg |= TWIN_EI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) card_base + TWIN_SERIAL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /* Read current status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) priv->rr0 = read_scc(priv, R0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* Enable DCD interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) write_scc(priv, R15, DCDIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) static int scc_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct scc_priv *priv = dev->ml_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct scc_info *info = priv->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) int card_base = priv->card_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (priv->type == TYPE_TWIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* Drop DTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) outb((info->twin_serial_cfg &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) card_base + TWIN_SERIAL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* Reset channel, free DMA and IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (priv->param.dma >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (priv->type == TYPE_TWIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) outb(0, card_base + TWIN_DMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) free_dma(priv->param.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (--info->irq_used == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) free_irq(dev->irq, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct scc_priv *priv = dev->ml_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) case SIOCGSCCPARAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (copy_to_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) (ifr->ifr_data, &priv->param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) sizeof(struct scc_param)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) case SIOCSSCCPARAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (copy_from_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) (&priv->param, ifr->ifr_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) sizeof(struct scc_param)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct scc_priv *priv = dev->ml_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (skb->protocol == htons(ETH_P_IP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return ax25_ip_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /* Temporarily stop the scheduler feeding us packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /* Transfer data to DMA buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) i = priv->tx_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) priv->tx_len[i] = skb->len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* Clear interrupts while we touch our circular buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) spin_lock_irqsave(&priv->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* Move the ring buffer's head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) priv->tx_head = (i + 1) % NUM_TX_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) priv->tx_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* If we just filled up the last buffer, leave queue stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) The higher layers must wait until we have a DMA buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) to accept the data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (priv->tx_count < NUM_TX_BUF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* Set new TX state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (priv->state == IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /* Assert RTS, start timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) priv->state = TX_HEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) priv->tx_start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) write_scc(priv, R15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) start_timer(priv, priv->param.txdelay, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* Turn interrupts back on and free buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) spin_unlock_irqrestore(&priv->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) static int scc_set_mac_address(struct net_device *dev, void *sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static inline void tx_on(struct scc_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) int i, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (priv->param.dma >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) n = (priv->chip == Z85230) ? 3 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /* Program DMA controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) flags = claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) set_dma_addr(priv->param.dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) (int) priv->tx_buf[priv->tx_tail] + n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) set_dma_count(priv->param.dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) priv->tx_len[priv->tx_tail] - n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) release_dma_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /* Enable TX underrun interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) write_scc(priv, R15, TxUIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* Configure DREQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (priv->type == TYPE_TWIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) outb((priv->param.dma ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) priv->card_base + TWIN_DMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) write_scc(priv, R1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) EXT_INT_ENAB | WT_FN_RDYFN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) WT_RDY_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* Write first byte(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) spin_lock_irqsave(priv->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) for (i = 0; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) write_scc_data(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) priv->tx_buf[priv->tx_tail][i], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) enable_dma(priv->param.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) spin_unlock_irqrestore(priv->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) write_scc(priv, R15, TxUIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) write_scc(priv, R1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) tx_isr(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /* Reset EOM latch if we do not have the AUTOEOM feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (priv->chip == Z8530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) write_scc(priv, R0, RES_EOM_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static inline void rx_on(struct scc_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /* Clear RX FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) while (read_scc(priv, R0) & Rx_CH_AV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) read_scc_data(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) priv->rx_over = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (priv->param.dma >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) /* Program DMA controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) flags = claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) set_dma_mode(priv->param.dma, DMA_MODE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) set_dma_addr(priv->param.dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) (int) priv->rx_buf[priv->rx_head]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) set_dma_count(priv->param.dma, BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) release_dma_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) enable_dma(priv->param.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /* Configure PackeTwin DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (priv->type == TYPE_TWIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) outb((priv->param.dma ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) priv->card_base + TWIN_DMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /* Sp. cond. intr. only, ext int enable, RX DMA enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* Reset current frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) priv->rx_ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /* Intr. on all Rx characters and Sp. cond., ext int enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) WT_FN_RDYFN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) write_scc(priv, R0, ERR_RES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static inline void rx_off(struct scc_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* Disable receiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) write_scc(priv, R3, Rx8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* Disable DREQ / RX interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) outb(0, priv->card_base + TWIN_DMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* Disable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (priv->param.dma >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) disable_dma(priv->param.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static void start_timer(struct scc_priv *priv, int t, int r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) outb(priv->tmr_mode, priv->tmr_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (t == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) tm_isr(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) } else if (t > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) outb(t & 0xFF, priv->tmr_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) outb((t >> 8) & 0xFF, priv->tmr_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (priv->type != TYPE_TWIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) write_scc(priv, R15, r15 | CTSIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) priv->rr0 |= CTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static inline unsigned char random(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) /* See "Numerical Recipes in C", second edition, p. 284 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) rand = rand * 1664525L + 1013904223L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return (unsigned char) (rand >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static inline void z8530_isr(struct scc_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) int is, i = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) while ((is = read_scc(&info->priv[0], R3)) && i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (is & CHARxIP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) rx_isr(&info->priv[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) } else if (is & CHATxIP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) tx_isr(&info->priv[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) } else if (is & CHAEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) es_isr(&info->priv[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) } else if (is & CHBRxIP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) rx_isr(&info->priv[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) } else if (is & CHBTxIP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) tx_isr(&info->priv[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) es_isr(&info->priv[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) write_scc(&info->priv[0], R0, RES_H_IUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (i < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) is);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* Ok, no interrupts pending from this 8530. The INT line should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) be inactive now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static irqreturn_t scc_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) struct scc_info *info = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) spin_lock(info->priv[0].register_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* At this point interrupts are enabled, and the interrupt under service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) is already acknowledged, but masked off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) Interrupt processing: We loop until we know that the IRQ line is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) low. If another positive edge occurs afterwards during the ISR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) another interrupt will be triggered by the interrupt controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) as soon as the IRQ level is enabled again (see asm/irq.h).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) Bottom-half handlers will be processed after scc_isr(). This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) important, since we only have small ringbuffers and want new data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) to be fetched/delivered immediately. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (info->priv[0].type == TYPE_TWIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) int is, card_base = info->priv[0].card_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) while ((is = ~inb(card_base + TWIN_INT_REG)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) TWIN_INT_MSK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (is & TWIN_SCC_MSK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) z8530_isr(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) } else if (is & TWIN_TMR1_MSK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) inb(card_base + TWIN_CLR_TMR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) tm_isr(&info->priv[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) inb(card_base + TWIN_CLR_TMR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) tm_isr(&info->priv[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) z8530_isr(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) spin_unlock(info->priv[0].register_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static void rx_isr(struct scc_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (priv->param.dma >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /* Check special condition and perform error reset. See 2.4.7.5. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) special_condition(priv, read_scc(priv, R1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) write_scc(priv, R0, ERR_RES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) /* Check special condition for each character. Error reset not necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) while (read_scc(priv, R0) & Rx_CH_AV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) rc = read_scc(priv, R1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (priv->rx_ptr < BUF_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) priv->rx_buf[priv->rx_head][priv->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) rx_ptr++] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) read_scc_data(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) priv->rx_over = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) read_scc_data(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) special_condition(priv, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static void special_condition(struct scc_priv *priv, int rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) int cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /* See Figure 2-15. Only overrun and EOF need to be checked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (rc & Rx_OVR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /* Receiver overrun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) priv->rx_over = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (priv->param.dma < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) write_scc(priv, R0, ERR_RES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) } else if (rc & END_FR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /* End of frame. Get byte count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (priv->param.dma >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) flags = claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) release_dma_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) cb = priv->rx_ptr - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (priv->rx_over) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /* We had an overrun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) priv->dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (priv->rx_over == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) priv->dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) priv->dev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) priv->rx_over = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) } else if (rc & CRC_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /* Count invalid CRC only if packet length >= minimum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (cb >= 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) priv->dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) priv->dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (cb >= 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (priv->rx_count < NUM_RX_BUF - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /* Put good frame in FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) priv->rx_len[priv->rx_head] = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) priv->rx_head =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) (priv->rx_head +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 1) % NUM_RX_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) priv->rx_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) schedule_work(&priv->rx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) priv->dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) priv->dev->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* Get ready for new frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (priv->param.dma >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) flags = claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) set_dma_addr(priv->param.dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) (int) priv->rx_buf[priv->rx_head]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) set_dma_count(priv->param.dma, BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) release_dma_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) priv->rx_ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) static void rx_bh(struct work_struct *ugli_api)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) int i = priv->rx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) int cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) unsigned char *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) spin_lock_irqsave(&priv->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) while (priv->rx_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) spin_unlock_irqrestore(&priv->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) cb = priv->rx_len[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /* Allocate buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) skb = dev_alloc_skb(cb + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) /* Drop packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) priv->dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) /* Fill buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) data = skb_put(skb, cb + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) data[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) memcpy(&data[1], priv->rx_buf[i], cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) skb->protocol = ax25_type_trans(skb, priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) priv->dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) priv->dev->stats.rx_bytes += cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) spin_lock_irqsave(&priv->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /* Move tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) priv->rx_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) spin_unlock_irqrestore(&priv->ring_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static void tx_isr(struct scc_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) int i = priv->tx_tail, p = priv->tx_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /* Suspend TX interrupts if we don't want to send anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) See Figure 2-22. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (p == priv->tx_len[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) write_scc(priv, R0, RES_Tx_P);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /* Write characters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) write_scc_data(priv, priv->tx_buf[i][p++], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* Reset EOM latch of Z8530 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (!priv->tx_ptr && p && priv->chip == Z8530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) write_scc(priv, R0, RES_EOM_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) priv->tx_ptr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static void es_isr(struct scc_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) int i, rr0, drr0, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) /* Read status, reset interrupt bit (open latches) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) rr0 = read_scc(priv, R0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) write_scc(priv, R0, RES_EXT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) drr0 = priv->rr0 ^ rr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) priv->rr0 = rr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) it might have already been cleared again by AUTOEOM. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (priv->state == TX_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /* Get remaining bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) i = priv->tx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (priv->param.dma >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) disable_dma(priv->param.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) flags = claim_dma_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) res = get_dma_residue(priv->param.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) release_dma_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) res = priv->tx_len[i] - priv->tx_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) priv->tx_ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* Disable DREQ / TX interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) outb(0, priv->card_base + TWIN_DMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /* Update packet statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) priv->dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) priv->dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /* Other underrun interrupts may already be waiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) write_scc(priv, R0, RES_EXT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) write_scc(priv, R0, RES_EXT_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /* Update packet statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) priv->dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) priv->dev->stats.tx_bytes += priv->tx_len[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /* Remove frame from FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) priv->tx_tail = (i + 1) % NUM_TX_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) priv->tx_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /* Inform upper layers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) netif_wake_queue(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /* Switch state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) write_scc(priv, R15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (priv->tx_count &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) (jiffies - priv->tx_start) < priv->param.txtimeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) priv->state = TX_PAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) start_timer(priv, priv->param.txpause, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) priv->state = TX_TAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) start_timer(priv, priv->param.txtail, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /* DCD transition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (drr0 & DCD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (rr0 & DCD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) switch (priv->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) case IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) case WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) priv->state = DCD_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) write_scc(priv, R15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) start_timer(priv, priv->param.dcdon, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) switch (priv->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) case RX_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) rx_off(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) priv->state = DCD_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) write_scc(priv, R15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) start_timer(priv, priv->param.dcdoff, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /* CTS transition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) tm_isr(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) static void tm_isr(struct scc_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) switch (priv->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) case TX_HEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) case TX_PAUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) tx_on(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) priv->state = TX_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) case TX_TAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) write_scc(priv, R5, TxCRC_ENAB | Tx8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) priv->state = RTS_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (priv->type != TYPE_TWIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) write_scc(priv, R15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) start_timer(priv, priv->param.rtsoff, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) case RTS_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) write_scc(priv, R15, DCDIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) priv->rr0 = read_scc(priv, R0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (priv->rr0 & DCD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) priv->dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) rx_on(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) priv->state = RX_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) priv->state = WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) start_timer(priv, priv->param.waittime, DCDIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) case WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (priv->tx_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) priv->state = TX_HEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) priv->tx_start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) write_scc(priv, R5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) TxCRC_ENAB | RTS | TxENAB | Tx8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) write_scc(priv, R15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) start_timer(priv, priv->param.txdelay, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) priv->state = IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (priv->type != TYPE_TWIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) write_scc(priv, R15, DCDIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) case DCD_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) case DCD_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) write_scc(priv, R15, DCDIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) priv->rr0 = read_scc(priv, R0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (priv->rr0 & DCD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) rx_on(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) priv->state = RX_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) priv->state = WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) start_timer(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) random() / priv->param.persist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) priv->param.slottime, DCDIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }