^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) iphase.c: Device driver for Interphase ATM PCI adapter cards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) Author: Peter Wang <pwang@iphase.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) Interphase Corporation <www.iphase.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) Version: 1.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) This software may be used and distributed according to the terms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) of the GNU General Public License (GPL), incorporated herein by reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) Drivers based on this skeleton fall under the GPL and must retain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) the authorship (implicit copyright) notice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) was originally written by Monalisa Agrawal at UNH. Now this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) supports a variety of varients of Interphase ATM PCI (i)Chip adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) in terms of PHY type, the size of control memory and the size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) packet memory. The following are the change log and history:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) Bugfix the Mona's UBR driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) Modify the basic memory allocation and dma logic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) Port the driver to the latest kernel from 2.0.46.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) Complete the ABR logic of the driver, and added the ABR work-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) around for the hardware anormalies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) Add the CBR support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) Add the flow control logic to the driver to allow rate-limit VC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) Add 4K VC support to the board with 512K control memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) Add the support of all the variants of the Interphase ATM PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) (25M UTP25) and x531 (DS3 and E3).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) Add SMP support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) Support and updates available at: ftp://ftp.iphase.com/pub/atm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *******************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/atm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/atmdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/sonet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <asm/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include "iphase.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include "suni.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void desc_dbg(IADEV *iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static IADEV *ia_dev[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static struct atm_dev *_ia_dev[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static int iadev_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static void ia_led_timer(struct timer_list *unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static DEFINE_TIMER(ia_timer, ia_led_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) module_param(IA_TX_BUF, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) module_param(IA_TX_BUF_SZ, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) module_param(IA_RX_BUF, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) module_param(IA_RX_BUF_SZ, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) module_param(IADebugFlag, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /**************************** IA_LIB **********************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static void ia_init_rtn_q (IARTN_Q *que)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) que->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) que->tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) data->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (que->next == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) que->next = que->tail = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) data->next = que->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) que->next = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) entry->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) entry->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (que->next == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) que->next = que->tail = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) que->tail->next = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) que->tail = que->tail->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) IARTN_Q *tmpdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (que->next == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) tmpdata = que->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if ( que->next == que->tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) que->next = que->tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) que->next = que->next->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return tmpdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void ia_hack_tcq(IADEV *dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u_short desc1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u_short tcq_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct ia_vcc *iavcc_r = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) while (dev->host_tcq_wr != tcq_wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (!desc1) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) else if (!dev->desc_tbl[desc1 -1].timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) else if (dev->desc_tbl[desc1 -1].timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) printk("IA: Fatal err in get_desc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) iavcc_r->vc_desc_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) dev->desc_tbl[desc1 -1].timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) dev->desc_tbl[desc1 -1].txskb, desc1);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (iavcc_r->pcr < dev->rate_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) printk("ia_hack_tcq: No memory available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) dev->desc_tbl[desc1 -1].iavcc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) dev->desc_tbl[desc1 -1].txskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dev->host_tcq_wr += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (dev->host_tcq_wr > dev->ffL.tcq_ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) dev->host_tcq_wr = dev->ffL.tcq_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) } /* ia_hack_tcq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u_short desc_num, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct ia_vcc *iavcc_r = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned long delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static unsigned long timer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int ltimeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ia_hack_tcq (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) timer = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) i=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) while (i < dev->num_tx_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!dev->desc_tbl[i].timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) delta = jiffies - dev->desc_tbl[i].timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (delta >= ltimeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) dev->ffL.tcq_rd = dev->ffL.tcq_ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) dev->ffL.tcq_rd -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!(skb = dev->desc_tbl[i].txskb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) !(iavcc_r = dev->desc_tbl[i].iavcc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) printk("Fatal err, desc table vcc or skb is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) iavcc_r->vc_desc_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dev->desc_tbl[i].timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) dev->desc_tbl[i].iavcc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) dev->desc_tbl[i].txskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) } /* while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (dev->ffL.tcq_rd == dev->host_tcq_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Get the next available descriptor number from TCQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) dev->ffL.tcq_rd += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) dev->ffL.tcq_rd = dev->ffL.tcq_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (dev->ffL.tcq_rd == dev->host_tcq_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* get system time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) dev->desc_tbl[desc_num -1].timestamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return desc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u_char foundLockUp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) vcstatus_t *vcstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u_short *shd_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) u_short tempCellSlot, tempFract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) u_int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (vcc->qos.txtp.traffic_class == ATM_ABR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) vcstatus->cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) foundLockUp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if( vcstatus->cnt == 0x05 ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) abr_vc += vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) eabr_vc += vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if( eabr_vc->last_desc ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* Wait for 10 Micro sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) foundLockUp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) tempCellSlot = abr_vc->last_cell_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) tempFract = abr_vc->fraction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) && (tempFract == dev->testTable[vcc->vci]->fract))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) foundLockUp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) dev->testTable[vcc->vci]->lastTime = tempCellSlot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) dev->testTable[vcc->vci]->fract = tempFract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) } /* last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) vcstatus->cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) } /* vcstatus->cnt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (foundLockUp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) IF_ABR(printk("LOCK UP found\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) writew(0xFFFD, dev->seg_reg+MODE_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* Wait for 10 Micro sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) abr_vc->status &= 0xFFF8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) abr_vc->status |= 0x0001; /* state is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (i < dev->num_vc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) shd_tbl[i] = vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) vcstatus->cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) } /* foundLockUp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) } /* if an ABR VC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) ** +----+----+------------------+-------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) ** | R | NZ | 5-bit exponent | 9-bit mantissa |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ** +----+----+------------------+-------------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ** R = reserved (written as 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ** NZ = 0 if 0 cells/sec; 1 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static u16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) cellrate_to_float(u32 cr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #define NZ 0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #define M_BITS 9 /* Number of bits in mantissa */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #define E_BITS 5 /* Number of bits in exponent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #define M_MASK 0x1ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #define E_MASK 0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u16 flot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) u32 tmp = cr & 0x00ffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (cr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) while (tmp != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) tmp >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (i == M_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) flot = NZ | (i << M_BITS) | (cr & M_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) else if (i < M_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return flot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) float_to_cellrate(u16 rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u32 exp, mantissa, cps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if ((rate & NZ) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) exp = (rate >> M_BITS) & E_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) mantissa = rate & M_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (exp == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) cps = (1 << M_BITS) | mantissa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (exp == M_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) cps = cps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) else if (exp > M_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) cps <<= (exp - M_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) cps >>= (M_BITS - exp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return cps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) srv_p->class_type = ATM_ABR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) srv_p->pcr = dev->LineRate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) srv_p->mcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) srv_p->icr = 0x055cb7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) srv_p->tbe = 0xffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) srv_p->frtt = 0x3a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) srv_p->rif = 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) srv_p->rdf = 0xb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) srv_p->nrm = 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) srv_p->trm = 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) srv_p->cdf = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) srv_p->adtf = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct atm_vcc *vcc, u8 flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) f_vc_abr_entry *f_abr_vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) r_vc_abr_entry *r_abr_vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) u32 icr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) u8 trm, nrm, crm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u16 adtf, air, *ptr16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) f_abr_vc += vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) switch (flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) case 1: /* FFRED initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #if 0 /* sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (srv_p->pcr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return INVALID_PCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (srv_p->pcr > dev->LineRate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) srv_p->pcr = dev->LineRate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return MCR_UNAVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (srv_p->mcr > srv_p->pcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return INVALID_MCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!(srv_p->icr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) srv_p->icr = srv_p->pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return INVALID_ICR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return INVALID_TBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return INVALID_FRTT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (srv_p->nrm > MAX_NRM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return INVALID_NRM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (srv_p->trm > MAX_TRM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return INVALID_TRM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (srv_p->adtf > MAX_ADTF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return INVALID_ADTF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) else if (srv_p->adtf == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) srv_p->adtf = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (srv_p->cdf > MAX_CDF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return INVALID_CDF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (srv_p->rif > MAX_RIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return INVALID_RIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (srv_p->rdf > MAX_RDF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return INVALID_RDF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) f_abr_vc->f_vc_type = ABR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* i.e 2**n = 2 << (n-1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) f_abr_vc->f_nrm = nrm << 8 | nrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) trm = 100000/(2 << (16 - srv_p->trm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if ( trm == 0) trm = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) crm = srv_p->tbe / nrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (crm == 0) crm = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) f_abr_vc->f_crm = crm & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ((srv_p->tbe/srv_p->frtt)*1000000) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) (1000000/(srv_p->frtt/srv_p->tbe)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) f_abr_vc->f_icr = cellrate_to_float(icr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) adtf = (10000 * srv_p->adtf)/8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (adtf == 0) adtf = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) f_abr_vc->f_acr = f_abr_vc->f_icr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) f_abr_vc->f_status = 0x0042;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) case 0: /* RFRED initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) r_abr_vc += vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) air = srv_p->pcr << (15 - srv_p->rif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (air == 0) air = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) r_abr_vc->r_air = cellrate_to_float(air);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) dev->sum_mcr += srv_p->mcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dev->n_abr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) u32 rateLow=0, rateHigh, rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct ia_vcc *ia_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) int idealSlot =0, testSlot, toBeAssigned, inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) u32 spacing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) u16 *SchedTbl, *TstSchedTbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u16 cbrVC, vcIndex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) u32 fracSlot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) u32 sp_mod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) u32 sp_mod2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* IpAdjustTrafficParams */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (vcc->qos.txtp.max_pcr <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) IF_ERR(printk("PCR for CBR not defined\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) rate = vcc->qos.txtp.max_pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) entries = rate / dev->Granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) entries, rate, dev->Granularity);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (entries < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rateLow = entries * dev->Granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) rateHigh = (entries + 1) * dev->Granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (3*(rate - rateLow) > (rateHigh - rate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (entries > dev->CbrRemEntries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) entries, dev->CbrRemEntries);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ia_vcc = INPH_IA_VCC(vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) ia_vcc->NumCbrEntry = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) dev->sum_mcr += entries * dev->Granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* IaFFrednInsertCbrSched */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) // Starting at an arbitrary location, place the entries into the table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) // as smoothly as possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) cbrVC = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) spacing = dev->CbrTotEntries / entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) sp_mod = dev->CbrTotEntries % entries; // get modulo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) toBeAssigned = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) fracSlot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) vcIndex = vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) while (toBeAssigned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) // If this is the first time, start the table loading for this connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) // as close to entryPoint as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (toBeAssigned == entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) idealSlot = dev->CbrEntryPt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (dev->CbrEntryPt >= dev->CbrTotEntries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) idealSlot += (u32)(spacing + fracSlot); // Point to the next location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) // in the table that would be smoothest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (idealSlot >= (int)dev->CbrTotEntries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) idealSlot -= dev->CbrTotEntries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) // Continuously check around this ideal value until a null
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) // location is encountered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) inc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) testSlot = idealSlot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) testSlot, TstSchedTbl,toBeAssigned);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) while (cbrVC) // If another VC at this location, we have to keep looking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) inc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) testSlot = idealSlot - inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (testSlot < 0) { // Wrap if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) testSlot += dev->CbrTotEntries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) SchedTbl,testSlot);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (!cbrVC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) testSlot = idealSlot + inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) testSlot -= dev->CbrTotEntries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) testSlot, toBeAssigned);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) // set table index and read in value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) TstSchedTbl = (u16*)(SchedTbl + testSlot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) TstSchedTbl,cbrVC,inc);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) } /* while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) // Move this VCI number into this location of the CBR Sched table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) dev->CbrRemEntries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) toBeAssigned--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) } /* while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* IaFFrednCbrEnable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dev->NumEnabledCBR++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (dev->NumEnabledCBR == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) IF_CBR(printk("CBR is enabled\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static void ia_cbrVc_close (struct atm_vcc *vcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) u16 *SchedTbl, NullVci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) u32 i, NumFound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) iadev = INPH_IA_DEV(vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) iadev->NumEnabledCBR--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (iadev->NumEnabledCBR == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) IF_CBR (printk("CBR support disabled\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) NumFound = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) for (i=0; i < iadev->CbrTotEntries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (*SchedTbl == vcc->vci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) iadev->CbrRemEntries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) *SchedTbl = NullVci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) IF_CBR(NumFound++;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) SchedTbl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static int ia_avail_descs(IADEV *iadev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ia_hack_tcq(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) iadev->ffL.tcq_st) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static int ia_que_tx (IADEV *iadev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) int num_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct atm_vcc *vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) num_desc = ia_avail_descs(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (!(vcc = ATM_SKB(skb)->vcc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) printk("ia_que_tx: Null vcc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (!test_bit(ATM_VF_READY,&vcc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) printk("Free the SKB on closed vci %d \n", vcc->vci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (ia_pkt_tx (vcc, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) skb_queue_head(&iadev->tx_backlog, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) num_desc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static void ia_tx_poll (IADEV *iadev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct atm_vcc *vcc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct sk_buff *skb = NULL, *skb1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct ia_vcc *iavcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) IARTN_Q * rtne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ia_hack_tcq(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) skb = rtne->data.txskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) printk("ia_tx_poll: skb is null\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) vcc = ATM_SKB(skb)->vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!vcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) printk("ia_tx_poll: vcc is null\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) iavcc = INPH_IA_VCC(vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (!iavcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) printk("ia_tx_poll: iavcc is null\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) skb1 = skb_dequeue(&iavcc->txing_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) while (skb1 && (skb1 != skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) IF_ERR(printk("Release the SKB not match\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if ((vcc->pop) && (skb1->len != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) vcc->pop(vcc, skb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) (long)skb1);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) dev_kfree_skb_any(skb1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) skb1 = skb_dequeue(&iavcc->txing_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (!skb1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if ((vcc->pop) && (skb->len != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) vcc->pop(vcc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) kfree(rtne);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) ia_que_tx(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) u32 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * Issue a command to enable writes to the NOVRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) NVRAM_CMD (EXTEND + EWEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) NVRAM_CLR_CE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * issue the write command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) NVRAM_CMD(IAWRITE + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * Send the data, starting with D15, then D14, and so on for 16 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) for (i=15; i>=0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) NVRAM_CLKOUT (val & 0x8000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) val <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) NVRAM_CLR_CE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) CFG_OR(NVCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) while (!(t & NVDO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) NVRAM_CLR_CE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * disable writes again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) NVRAM_CMD(EXTEND + EWDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) NVRAM_CLR_CE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) CFG_AND(~NVDI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) u_short val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) u32 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * Read the first bit that was clocked with the falling edge of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * the last command data clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) NVRAM_CMD(IAREAD + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * Now read the rest of the bits, the next bit read is D14, then D13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * and so on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) for (i=15; i>=0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) NVRAM_CLKIN(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) val |= (t << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) NVRAM_CLR_CE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) CFG_AND(~NVDI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static void ia_hw_type(IADEV *iadev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) u_short memType = ia_eeprom_get(iadev, 25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) iadev->memType = memType;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) iadev->num_tx_desc = IA_TX_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) iadev->tx_buf_sz = IA_TX_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) iadev->num_rx_desc = IA_RX_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) iadev->rx_buf_sz = IA_RX_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (IA_TX_BUF == DFL_TX_BUFFERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) iadev->num_tx_desc = IA_TX_BUF / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) iadev->num_tx_desc = IA_TX_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) iadev->tx_buf_sz = IA_TX_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (IA_RX_BUF == DFL_RX_BUFFERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) iadev->num_rx_desc = IA_RX_BUF / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) iadev->num_rx_desc = IA_RX_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) iadev->rx_buf_sz = IA_RX_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (IA_TX_BUF == DFL_TX_BUFFERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) iadev->num_tx_desc = IA_TX_BUF / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) iadev->num_tx_desc = IA_TX_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) iadev->tx_buf_sz = IA_TX_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (IA_RX_BUF == DFL_RX_BUFFERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) iadev->num_rx_desc = IA_RX_BUF / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) iadev->num_rx_desc = IA_RX_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) iadev->rx_buf_sz = IA_RX_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) iadev->rx_buf_sz, iadev->rx_pkt_ram);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if ((memType & FE_MASK) == FE_SINGLE_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) iadev->phy_type = PHY_OC3C_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) else if ((memType & FE_MASK) == FE_UTP_OPTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) iadev->phy_type = PHY_UTP155;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) iadev->phy_type = PHY_OC3C_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) iadev->phy_type = memType & FE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) memType,iadev->phy_type);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (iadev->phy_type == FE_25MBIT_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) else if (iadev->phy_type == FE_DS3_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) else if (iadev->phy_type == FE_E3_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) iadev->LineRate = (u32)(ATM_OC3_PCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return readl(ia->phy + (reg >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) writel(val, ia->phy + (reg >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) static void ia_frontend_intr(struct iadev_priv *iadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (iadev->phy_type & FE_25MBIT_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) status = ia_phy_read32(iadev, MB25_INTR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) } else if (iadev->phy_type & FE_DS3_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) } else if (iadev->phy_type & FE_E3_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) printk(KERN_INFO "IA: SUNI carrier %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) iadev->carrier_detect ? "detected" : "lost signal");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) static void ia_mb25_init(struct iadev_priv *iadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) iadev->carrier_detect =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct ia_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) u16 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static void ia_phy_write(struct iadev_priv *iadev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) const struct ia_reg *regs, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) while (len--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) ia_phy_write32(iadev, regs->reg, regs->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) regs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) static const struct ia_reg suni_ds3_init[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) { SUNI_DS3_FRM_INTR_ENBL, 0x17 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) { SUNI_DS3_FRM_CFG, 0x01 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) { SUNI_DS3_TRAN_CFG, 0x01 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) { SUNI_CONFIG, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) { SUNI_SPLR_CFG, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) { SUNI_SPLT_CFG, 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) static const struct ia_reg suni_e3_init[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) { SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) { SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) { SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) { SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) { SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) { SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) { SUNI_CONFIG, SUNI_PM7345_E3ENBL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) { SUNI_SPLR_CFG, 0x41 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) { SUNI_SPLT_CFG, 0x41 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static void ia_suni_pm7345_init(struct iadev_priv *iadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static const struct ia_reg suni_init[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /* Enable RSOP loss of signal interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) { SUNI_INTR_ENBL, 0x28 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* Clear error counters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) { SUNI_ID_RESET, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /* Clear "PMCTST" in master test register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) { SUNI_MASTER_TEST, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) { SUNI_RXCP_CTRL, 0x2c },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) { SUNI_RXCP_FCTRL, 0x81 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) { SUNI_RXCP_IDLE_PAT_H1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) { SUNI_RXCP_IDLE_PAT_H2, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) { SUNI_RXCP_IDLE_PAT_H3, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) { SUNI_RXCP_IDLE_PAT_H4, 0x01 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) { SUNI_RXCP_IDLE_MASK_H1, 0xff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) { SUNI_RXCP_IDLE_MASK_H2, 0xff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) { SUNI_RXCP_IDLE_MASK_H3, 0xff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) { SUNI_RXCP_IDLE_MASK_H4, 0xfe },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) { SUNI_RXCP_CELL_PAT_H1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) { SUNI_RXCP_CELL_PAT_H2, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) { SUNI_RXCP_CELL_PAT_H3, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) { SUNI_RXCP_CELL_PAT_H4, 0x01 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) { SUNI_RXCP_CELL_MASK_H1, 0xff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) { SUNI_RXCP_CELL_MASK_H2, 0xff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) { SUNI_RXCP_CELL_MASK_H3, 0xff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) { SUNI_RXCP_CELL_MASK_H4, 0xff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) { SUNI_TXCP_CTRL, 0xa4 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) { SUNI_TXCP_INTR_EN_STS, 0x10 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) { SUNI_TXCP_IDLE_PAT_H5, 0x55 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (iadev->phy_type & FE_DS3_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ia_suni_pm7345_init_ds3(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) ia_suni_pm7345_init_e3(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) SUNI_PM7345_DLB | SUNI_PM7345_PLB));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) #ifdef __SNMP__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) #endif /* __SNMP__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /***************************** IA_LIB END *****************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) #ifdef CONFIG_ATM_IA_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static int tcnter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static void xdump( u_char* cp, int length, char* prefix )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) int col, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) u_char prntBuf[120];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) u_char* pBuf = prntBuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) while(count < length){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) pBuf += sprintf( pBuf, "%s", prefix );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) for(col = 0;count + col < length && col < 16; col++){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (col != 0 && (col % 4) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) pBuf += sprintf( pBuf, " " );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) while(col++ < 16){ /* pad end of buffer with blanks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if ((col % 4) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) sprintf( pBuf, " " );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) pBuf += sprintf( pBuf, " " );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) pBuf += sprintf( pBuf, " " );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) for(col = 0;count + col < length && col < 16; col++){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (isprint((int)cp[count + col]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) pBuf += sprintf( pBuf, "%c", cp[count + col] );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) pBuf += sprintf( pBuf, "." );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) printk("%s\n", prntBuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) count += col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) pBuf = prntBuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) } /* close xdump(... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) #endif /* CONFIG_ATM_IA_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static struct atm_dev *ia_boards = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) #define ACTUAL_RAM_BASE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) RAM_BASE*((iadev->mem)/(128 * 1024))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) #define ACTUAL_SEG_RAM_BASE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) #define ACTUAL_REASS_RAM_BASE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /*-- some utilities and memory allocation stuff will come here -------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static void desc_dbg(IADEV *iadev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) void __iomem *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) // regval = readl((u32)ia_cmds->maddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) readw(iadev->seg_ram+tcq_wr_ptr-2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) iadev->ffL.tcq_rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) while (tcq_st_ptr != tcq_ed_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) tmp = iadev->seg_ram+tcq_st_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) tcq_st_ptr += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) for(i=0; i <iadev->num_tx_desc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /*----------------------------- Receiving side stuff --------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static void rx_excp_rcvd(struct atm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) #if 0 /* closing the receiving size will cause too many excp int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) u_short state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) u_short excpq_rd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) //u_short *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) int vci, error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) { printk("state = %x \n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) // TODO: update exception stat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) vci = readw(iadev->reass_ram+excpq_rd_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) // pwang_test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) excpq_rd_ptr += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) static void free_desc(struct atm_dev *dev, int desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) iadev->rfL.fdq_wr +=2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static int rx_pkt(struct atm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) struct atm_vcc *vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct rx_buf_desc __iomem *buf_desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) int desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct dle* wr_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) u_int buf_addr, dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* mask 1st 3 bits to get the actual descno. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) iadev->reass_ram, iadev->rfL.pcq_rd, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) printk(" pcq_wr_ptr = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /* update the read pointer - maybe we shud do this in the end*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) iadev->rfL.pcq_rd += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /* get the buffer desc entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) update stuff. - doesn't seem to be any update necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* make the ptr point to the corresponding buffer desc entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) buf_desc_ptr += desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (!desc || (desc > iadev->num_rx_desc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) free_desc(dev, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (!vcc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) free_desc(dev, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) printk("IA: null vcc, drop PDU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /* might want to check the status bits for errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) status = (u_short) (buf_desc_ptr->desc_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (status & (RX_CER | RX_PTE | RX_OFL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) atomic_inc(&vcc->stats->rx_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) IF_ERR(printk("IA: bad packet, dropping it");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (status & RX_CER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) IF_ERR(printk(" cause: packet CRC error\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) else if (status & RX_PTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) IF_ERR(printk(" cause: packet time out\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) IF_ERR(printk(" cause: buffer overflow\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) goto out_free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) build DLE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) len = dma_addr - buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (len > iadev->rx_buf_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) atomic_inc(&vcc->stats->rx_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) goto out_free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (vcc->vci < 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) printk("Drop control packets\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) goto out_free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) skb_put(skb,len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) // pwang_test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) ATM_SKB(skb)->vcc = vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) ATM_DESC(skb) = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) skb_queue_tail(&iadev->rx_dma_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* Build the DLE structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) wr_ptr = iadev->rx_dle_q.write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) wr_ptr->local_pkt_addr = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) wr_ptr->bytes = len; /* We don't know this do we ?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) wr_ptr->mode = DMA_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /* shud take care of wrap around here too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if(++wr_ptr == iadev->rx_dle_q.end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) wr_ptr = iadev->rx_dle_q.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) iadev->rx_dle_q.write = wr_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* Increment transaction counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) out: return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) out_free_desc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) free_desc(dev, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static void rx_intr(struct atm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) u_short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) u_short state, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (status & RX_PKT_RCVD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /* do something */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /* Basically recvd an interrupt for receiving a packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) A descriptor would have been written to the packet complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) queue. Get all the descriptors and set up dma to move the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) packets till the packet complete queue is empty..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) while(!(state & PCQ_EMPTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) rx_pkt(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) iadev->rxing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (status & RX_FREEQ_EMPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (iadev->rxing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) iadev->rx_tmp_jif = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) iadev->rxing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) for (i = 1; i <= iadev->num_rx_desc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) free_desc(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) printk("Test logic RUN!!!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) iadev->rxing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (status & RX_EXCP_RCVD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /* probably need to handle the exception queue also. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) rx_excp_rcvd(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (status & RX_RAW_RCVD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* need to handle the raw incoming cells. This deepnds on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) whether we have programmed to receive the raw cells or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) Else ignore. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static void rx_dle_intr(struct atm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct atm_vcc *vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) int desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) u_short state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct dle *dle, *cur_dle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) u_int dle_lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /* free all the dles done, that is just update our own dle read pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) - do we really need to do this. Think not. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /* DMA is done, just get all the recevie buffers from the rx dma queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) and push them up to the higher layer protocol. Also free the desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) associated with the buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) dle = iadev->rx_dle_q.read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) while(dle != cur_dle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /* free the DMAed skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) skb = skb_dequeue(&iadev->rx_dma_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) goto INCR_DLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) desc = ATM_DESC(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) free_desc(dev, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (!(len = skb->len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) printk("rx_dle_intr: skb len 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) struct cpcs_trailer *trailer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) u_short length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) struct ia_vcc *ia_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /* no VCC related housekeeping done as yet. lets see */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) vcc = ATM_SKB(skb)->vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (!vcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) printk("IA: null vcc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) goto INCR_DLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) ia_vcc = INPH_IA_VCC(vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (ia_vcc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) atomic_inc(&vcc->stats->rx_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) atm_return(vcc, skb->truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) goto INCR_DLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) // get real pkt length pwang_test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) trailer = (struct cpcs_trailer*)((u_char *)skb->data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) skb->len - sizeof(*trailer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) length = swap_byte_order(trailer->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if ((length > iadev->rx_buf_sz) || (length >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) (skb->len - sizeof(struct cpcs_trailer))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) atomic_inc(&vcc->stats->rx_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) length, skb->len);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) atm_return(vcc, skb->truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) goto INCR_DLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) skb_trim(skb, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /* Display the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) xdump(skb->data, skb->len, "RX: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) printk("\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) IF_RX(printk("rx_dle_intr: skb push");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) vcc->push(vcc,skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) atomic_inc(&vcc->stats->rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) iadev->rx_pkt_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) INCR_DLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (++dle == iadev->rx_dle_q.end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) dle = iadev->rx_dle_q.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) iadev->rx_dle_q.read = dle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /* if the interrupts are masked because there were no free desc available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) unmask them now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (!iadev->rxing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (!(state & FREEQ_EMPTY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) iadev->reass_reg+REASS_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) iadev->rxing++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static int open_rx(struct atm_vcc *vcc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) u_short __iomem *vc_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) u_short __iomem *reass_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) iadev = INPH_IA_DEV(vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (iadev->phy_type & FE_25MBIT_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) printk("IA: ABR not support\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /* Make only this VCI in the vc table valid and let all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) others be invalid entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) vc_table += vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /* mask the last 6 bits and OR it with 3 for 1K VCs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) *vc_table = vcc->vci << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* Also keep a list of open rx vcs so that we can attach them with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) incoming PDUs later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) (vcc->qos.txtp.traffic_class == ATM_ABR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) srv_cls_param_t srv_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) init_abr_vc(iadev, &srv_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) ia_open_abr_vc(iadev, &srv_p, vcc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) else { /* for UBR later may need to add CBR logic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) reass_ptr += vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) *reass_ptr = NO_AAL5_PKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (iadev->rx_open[vcc->vci])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) vcc->dev->number, vcc->vci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) iadev->rx_open[vcc->vci] = vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static int rx_init(struct atm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct rx_buf_desc __iomem *buf_desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) unsigned long rx_pkt_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) void *dle_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct abr_vc_table *abr_vc_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) u16 *vc_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) u16 *reass_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) int i,j, vcsize_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) u_short freeq_st_adr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) u_short *freeq_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) // spin_lock_init(&iadev->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /* Allocate 4k bytes - more aligned than needed (4k boundary) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) &iadev->rx_dle_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (!dle_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) iadev->rx_dle_q.start = (struct dle *)dle_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) iadev->rx_dle_q.read = iadev->rx_dle_q.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) iadev->rx_dle_q.write = iadev->rx_dle_q.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) /* the end of the dle q points to the entry after the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) DLE that can be used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /* write the upper 20 bits of the start address to rx list address register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /* We know this is 32bit bus addressed so the following is safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) writel(iadev->rx_dle_dma & 0xfffff000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) iadev->dma + IPHASE5575_RX_LIST_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) iadev->dma+IPHASE5575_TX_LIST_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) iadev->dma+IPHASE5575_RX_LIST_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) writew(0, iadev->reass_reg+MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* Receive side control memory map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) -------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) Buffer descr 0x0000 (736 - 23K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) VP Table 0x5c00 (256 - 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) Except q 0x5e00 (128 - 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) Free buffer q 0x6000 (1K - 2K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) Packet comp q 0x6800 (1K - 2K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) Reass Table 0x7000 (1K - 2K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) VC Table 0x7800 (1K - 2K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) ABR VC Table 0x8000 (1K - 32K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /* Base address for Buffer Descriptor Table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) /* Set the buffer size register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /* Initialize each entry in the Buffer Descriptor Table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) buf_desc_ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) rx_pkt_start = iadev->rx_pkt_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) for(i=1; i<=iadev->num_rx_desc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) buf_desc_ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) rx_pkt_start += iadev->rx_buf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) i = FREE_BUF_DESC_Q*iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) writew(i, iadev->reass_reg+FREEQ_ST_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) writew(i+iadev->num_rx_desc*sizeof(u_short),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) iadev->reass_reg+FREEQ_ED_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) writew(i, iadev->reass_reg+FREEQ_RD_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) writew(i+iadev->num_rx_desc*sizeof(u_short),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) iadev->reass_reg+FREEQ_WR_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /* Fill the FREEQ with all the free descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) for(i=1; i<=iadev->num_rx_desc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) *freeq_start = (u_short)i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) freeq_start++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /* Packet Complete Queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) writew(i, iadev->reass_reg+PCQ_ST_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) writew(i, iadev->reass_reg+PCQ_RD_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) writew(i, iadev->reass_reg+PCQ_WR_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) /* Exception Queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) iadev->reass_reg+EXCP_Q_ED_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /* Load local copy of FREEQ and PCQ ptrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) iadev->rfL.pcq_wr);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /* just for check - no VP TBL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) /* VP Table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) /* initialize VP Table for invalid VPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) - I guess we can write all 1s or 0x000f in the entire memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) space or something similar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) /* This seems to work and looks right to me too !!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) i = REASS_TABLE * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) /* initialize Reassembly table to I don't know what ???? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) reass_table = (u16 *)(iadev->reass_ram+i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) j = REASS_TABLE_SZ * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) for(i=0; i < j; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) *reass_table++ = NO_AAL5_PKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) i = 8*1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) vcsize_sel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) while (i != iadev->num_vc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) i /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) vcsize_sel++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) i = RX_VC_TABLE * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) j = RX_VC_TABLE_SZ * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) for(i = 0; i < j; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) /* shift the reassembly pointer by 3 + lower 3 bits of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) vc_lkup_base register (=3 for 1K VCs) and the last byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) is those low 3 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) Shall program this later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) *vc_table = (i << 6) | 15; /* for invalid VCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) vc_table++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) /* ABR VC table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) i = ABR_VC_TABLE * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) i = ABR_VC_TABLE * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) j = REASS_TABLE_SZ * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) for(i = 0; i < j; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) abr_vc_table->rdf = 0x0003;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) abr_vc_table->air = 0x5eb1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) abr_vc_table++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) /* Initialize other registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) /* VP Filter Register set for VC Reassembly only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) writew(0xff00, iadev->reass_reg+VP_FILTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) writew(0x1, iadev->reass_reg+PROTOCOL_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /* Packet Timeout Count related Registers :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) Set packet timeout to occur in about 3 seconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) Set Packet Aging Interval count register to overflow in about 4 us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) i = (j >> 6) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) j += 2 * (j - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) i |= ((j << 2) & 0xFF00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) writew(i, iadev->reass_reg+TMOUT_RANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /* initiate the desc_tble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) for(i=0; i<iadev->num_tx_desc;i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) iadev->desc_tbl[i].timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) /* to clear the interrupt status register - read it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) /* Mask Register - clear it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) skb_queue_head_init(&iadev->rx_dma_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) iadev->rx_free_desc_qhead = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (!iadev->rx_open) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) dev->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) goto err_free_dle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) iadev->rxing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) iadev->rx_pkt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /* Mode Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) writew(R_ONLINE, iadev->reass_reg+MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) err_free_dle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) iadev->rx_dle_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) The memory map suggested in appendix A and the coding for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) Keeping it around just in case we change our mind later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) Buffer descr 0x0000 (128 - 4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) UBR sched 0x1000 (1K - 4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) UBR Wait q 0x2000 (1K - 4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) (128 - 256) each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) extended VC 0x4000 (1K - 8K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) ABR sched 0x6000 and ABR wait queue (1K - 2K) each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) CBR sched 0x7000 (as needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) VC table 0x8000 (1K - 32K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) static void tx_intr(struct atm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (status & TRANSMIT_DONE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) IF_EVENT(printk("Transmit Done Intr logic run\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) spin_lock_irqsave(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) ia_tx_poll(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) spin_unlock_irqrestore(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (iadev->close_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) wake_up(&iadev->close_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (status & TCQ_NOT_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) static void tx_dle_intr(struct atm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct dle *dle, *cur_dle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct atm_vcc *vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct ia_vcc *iavcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) u_int dle_lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) spin_lock_irqsave(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) dle = iadev->tx_dle_q.read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) (sizeof(struct dle)*DLE_ENTRIES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) while (dle != cur_dle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /* free the DMAed skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) skb = skb_dequeue(&iadev->tx_dma_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (!skb) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) vcc = ATM_SKB(skb)->vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) if (!vcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) printk("tx_dle_intr: vcc is null\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) spin_unlock_irqrestore(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) iavcc = INPH_IA_VCC(vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (!iavcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) printk("tx_dle_intr: iavcc is null\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) spin_unlock_irqrestore(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if ((vcc->pop) && (skb->len != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) vcc->pop(vcc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) else { /* Hold the rate-limited skb for flow control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) IA_SKB_STATE(skb) |= IA_DLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) skb_queue_tail(&iavcc->txing_skb, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (++dle == iadev->tx_dle_q.end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) dle = iadev->tx_dle_q.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) iadev->tx_dle_q.read = dle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) spin_unlock_irqrestore(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) static int open_tx(struct atm_vcc *vcc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct ia_vcc *ia_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct main_vc *vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) struct ext_vc *evc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) iadev = INPH_IA_DEV(vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (iadev->phy_type & FE_25MBIT_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (vcc->qos.txtp.traffic_class == ATM_ABR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) printk("IA: ABR not support\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (vcc->qos.txtp.traffic_class == ATM_CBR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) printk("IA: CBR not support\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) ia_vcc = INPH_IA_VCC(vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (vcc->qos.txtp.max_sdu >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) printk("IA: SDU size over (%d) the configured SDU size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) vcc->dev_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) kfree(ia_vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) ia_vcc->vc_desc_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) ia_vcc->txing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) /* find pcr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) vcc->qos.txtp.pcr = iadev->LineRate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) vcc->qos.txtp.pcr = iadev->LineRate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (vcc->qos.txtp.pcr > iadev->LineRate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) vcc->qos.txtp.pcr = iadev->LineRate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) ia_vcc->pcr = vcc->qos.txtp.pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (ia_vcc->pcr < iadev->rate_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) skb_queue_head_init (&ia_vcc->txing_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) if (ia_vcc->pcr < iadev->rate_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) struct sock *sk = sk_atm(vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (vcc->qos.txtp.max_sdu != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (ia_vcc->pcr > 60000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) else if (ia_vcc->pcr > 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) sk->sk_sndbuf = 24576;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) vc += vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) evc += vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) memset((caddr_t)vc, 0, sizeof(*vc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) memset((caddr_t)evc, 0, sizeof(*evc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) /* store the most significant 4 bits of vci as the last 4 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) of first part of atm header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) store the last 12 bits of vci as first 12 bits of the second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) part of the atm header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) /* check the following for different traffic classes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (vcc->qos.txtp.traffic_class == ATM_UBR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) vc->type = UBR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) vc->status = CRC_APPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) vc->acr = cellrate_to_float(iadev->LineRate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) if (vcc->qos.txtp.pcr > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) vcc->qos.txtp.max_pcr,vc->acr);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) else if (vcc->qos.txtp.traffic_class == ATM_ABR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) { srv_cls_param_t srv_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) IF_ABR(printk("Tx ABR VCC\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) init_abr_vc(iadev, &srv_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (vcc->qos.txtp.pcr > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) srv_p.pcr = vcc->qos.txtp.pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (vcc->qos.txtp.min_pcr > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (tmpsum > iadev->LineRate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) srv_p.mcr = vcc->qos.txtp.min_pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) iadev->sum_mcr += vcc->qos.txtp.min_pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) else srv_p.mcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (vcc->qos.txtp.icr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) srv_p.icr = vcc->qos.txtp.icr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (vcc->qos.txtp.tbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) srv_p.tbe = vcc->qos.txtp.tbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (vcc->qos.txtp.frtt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) srv_p.frtt = vcc->qos.txtp.frtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) if (vcc->qos.txtp.rif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) srv_p.rif = vcc->qos.txtp.rif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (vcc->qos.txtp.rdf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) srv_p.rdf = vcc->qos.txtp.rdf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (vcc->qos.txtp.nrm_pres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) srv_p.nrm = vcc->qos.txtp.nrm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (vcc->qos.txtp.trm_pres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) srv_p.trm = vcc->qos.txtp.trm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (vcc->qos.txtp.adtf_pres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) srv_p.adtf = vcc->qos.txtp.adtf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (vcc->qos.txtp.cdf_pres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) srv_p.cdf = vcc->qos.txtp.cdf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (srv_p.icr > srv_p.pcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) srv_p.icr = srv_p.pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) srv_p.pcr, srv_p.mcr);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) ia_open_abr_vc(iadev, &srv_p, vcc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (iadev->phy_type & FE_25MBIT_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) printk("IA: CBR not support\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) IF_CBR(printk("PCR is not available\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) vc->type = CBR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) vc->status = CRC_APPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) printk("iadev: Non UBR, ABR and CBR traffic not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) IF_EVENT(printk("ia open_tx returning \n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) static int tx_init(struct atm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) struct tx_buf_desc *buf_desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) unsigned int tx_pkt_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) void *dle_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) u_short tcq_st_adr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) u_short *tcq_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) u_short prq_st_adr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) u_short *prq_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) struct main_vc *vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) struct ext_vc *evc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) u_short tmp16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) u32 vcsize_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) spin_lock_init(&iadev->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) IF_INIT(printk("Tx MASK REG: 0x%0x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) readw(iadev->seg_reg+SEG_MASK_REG));)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) /* Allocate 4k (boundary aligned) bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) &iadev->tx_dle_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (!dle_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) iadev->tx_dle_q.start = (struct dle*)dle_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) iadev->tx_dle_q.read = iadev->tx_dle_q.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) iadev->tx_dle_q.write = iadev->tx_dle_q.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) /* write the upper 20 bits of the start address to tx list address register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) writel(iadev->tx_dle_dma & 0xfffff000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) iadev->dma + IPHASE5575_TX_LIST_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) writew(0, iadev->seg_reg+MODE_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) Transmit side control memory map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) --------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) Buffer descr 0x0000 (128 - 4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) (512 - 1K) each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) TCQ - 4K, PRQ - 5K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) CBR Table 0x1800 (as needed) - 6K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) UBR Table 0x3000 (1K - 4K) - 12K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) UBR Wait queue 0x4000 (1K - 4K) - 16K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) ABR sched 0x5000 and ABR wait queue (1K - 2K) each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) ABR Tbl - 20K, ABR Wq - 22K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) extended VC 0x6000 (1K - 8K) - 24K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) VC Table 0x8000 (1K - 32K) - 32K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) and Wait q, which can be allotted later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) /* Buffer Descriptor Table Base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) /* initialize each entry in the buffer descriptor table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) buf_desc_ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) tx_pkt_start = TX_PACKET_RAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) for(i=1; i<=iadev->num_tx_desc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) buf_desc_ptr->desc_mode = AAL5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) buf_desc_ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) tx_pkt_start += iadev->tx_buf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) sizeof(*iadev->tx_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (!iadev->tx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) goto err_free_dle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) for (i= 0; i< iadev->num_tx_desc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) struct cpcs_trailer *cpcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if(!cpcs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) goto err_free_tx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) iadev->tx_buf[i].cpcs = cpcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) cpcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) sizeof(*cpcs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) sizeof(*iadev->desc_tbl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (!iadev->desc_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) goto err_free_all_tx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) /* Communication Queues base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) i = TX_COMP_Q * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) /* Transmit Complete Queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) writew(i, iadev->seg_reg+TCQ_ST_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) writew(i, iadev->seg_reg+TCQ_RD_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) iadev->seg_reg+TCQ_ED_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) /* Fill the TCQ with all the free descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) for(i=1; i<=iadev->num_tx_desc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) *tcq_start = (u_short)i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) tcq_start++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) /* Packet Ready Queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) i = PKT_RDY_Q * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) writew(i, iadev->seg_reg+PRQ_ST_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) iadev->seg_reg+PRQ_ED_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) writew(i, iadev->seg_reg+PRQ_RD_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) writew(i, iadev->seg_reg+PRQ_WR_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) /* Load local copy of PRQ and TCQ ptrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) /* Just for safety initializing the queue to have desc 1 always */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) /* Fill the PRQ with all the free descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) for(i=1; i<=iadev->num_tx_desc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) *prq_start = (u_short)0; /* desc 1 in all entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) prq_start++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) /* CBR Table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) IF_INIT(printk("Start CBR Init\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) writew(0,iadev->seg_reg+CBR_PTR_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) #else /* Charlie's logic is wrong ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) IF_INIT(printk("value in register = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) readw(iadev->seg_reg+CBR_PTR_BASE));)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) readw(iadev->seg_reg+CBR_TAB_BEG));)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) writew(tmp16, iadev->seg_reg+CBR_TAB_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) readw(iadev->seg_reg+CBR_TAB_END+1));)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) /* Initialize the CBR Schedualing Table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 0, iadev->num_vc*6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) iadev->CbrEntryPt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) iadev->NumEnabledCBR = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) /* UBR scheduling Table and wait queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) /* initialize all bytes of UBR scheduler table and wait queue to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) - SCHEDSZ is 1K (# of entries).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) - UBR Table size is 4K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) - UBR wait queue is 4K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) since the table and wait queues are contiguous, all the bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) can be initialized by one memeset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) vcsize_sel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) i = 8*1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) while (i != iadev->num_vc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) i /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) vcsize_sel++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) i = MAIN_VC_TABLE * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) i = EXT_VC_TABLE * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) i = UBR_SCHED_TABLE * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) i = UBR_WAIT_Q * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 0, iadev->num_vc*8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) /* initialize all bytes of ABR scheduler table and wait queue to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) - SCHEDSZ is 1K (# of entries).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) - ABR Table size is 2K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) - ABR wait queue is 2K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) since the table and wait queues are contiguous, all the bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) can be initialized by one memeset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) i = ABR_SCHED_TABLE * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) i = ABR_WAIT_Q * iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) i = ABR_SCHED_TABLE*iadev->memSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) iadev->testTable = kmalloc_array(iadev->num_vc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) sizeof(*iadev->testTable),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (!iadev->testTable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) printk("Get freepage failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) goto err_free_desc_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) for(i=0; i<iadev->num_vc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) memset((caddr_t)vc, 0, sizeof(*vc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) memset((caddr_t)evc, 0, sizeof(*evc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) if (!iadev->testTable[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) goto err_free_test_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) iadev->testTable[i]->lastTime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) iadev->testTable[i]->fract = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) iadev->testTable[i]->vc_status = VC_UBR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) vc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) evc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) /* Other Initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) /* Max Rate Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) if (iadev->phy_type & FE_25MBIT_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) writew(RATE25, iadev->seg_reg+MAXRATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) /* Set Idle Header Reigisters to be sure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) writew(0, iadev->seg_reg+IDLEHEADHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) writew(0, iadev->seg_reg+IDLEHEADLO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) iadev->close_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) init_waitqueue_head(&iadev->close_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) init_waitqueue_head(&iadev->timeout_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) skb_queue_head_init(&iadev->tx_dma_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) ia_init_rtn_q(&iadev->tx_return_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) /* RM Cell Protocol ID and Message Type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) skb_queue_head_init (&iadev->tx_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) /* Mode Register 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) /* Mode Register 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) /* Interrupt Status Register - read to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) iadev->tx_pkt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) iadev->rate_limit = iadev->LineRate / 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) err_free_test_tables:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) while (--i >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) kfree(iadev->testTable[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) kfree(iadev->testTable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) err_free_desc_tbl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) kfree(iadev->desc_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) err_free_all_tx_bufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) i = iadev->num_tx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) err_free_tx_bufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) while (--i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) sizeof(*desc->cpcs), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) kfree(desc->cpcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) kfree(iadev->tx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) err_free_dle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) iadev->tx_dle_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) static irqreturn_t ia_int(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) struct atm_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) if (status & STAT_REASSINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) /* do something */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) rx_intr(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (status & STAT_DLERINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /* Clear this bit by writing a 1 to it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) rx_dle_intr(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (status & STAT_SEGINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) /* do something */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) IF_EVENT(printk("IA: tx_intr \n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) tx_intr(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) if (status & STAT_DLETINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) tx_dle_intr(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if (status & STAT_FEINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) ia_frontend_intr(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) /*----------------------------- entries --------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) static int get_esi(struct atm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) u32 mac1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) u16 mac2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) mac1 = cpu_to_be32(le32_to_cpu(readl(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) iadev->reg+IPHASE5575_MAC1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) for (i=0; i<MAC1_LEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) for (i=0; i<MAC2_LEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) static int reset_sar(struct atm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) int i, error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) unsigned int pci[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) for(i=0; i<64; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) if ((error = pci_read_config_dword(iadev->pci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) writel(0, iadev->reg+IPHASE5575_EXT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) for(i=0; i<64; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) if ((error = pci_write_config_dword(iadev->pci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) static int ia_init(struct atm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) unsigned long real_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) unsigned short command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) int error, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) /* The device has been identified and registered. Now we read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) necessary configuration info like memory base address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) interrupt number etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) IF_INIT(printk(">ia_init\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) dev->ci_range.vpi_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) dev->ci_range.vci_bits = NR_VCI_LD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) real_base = pci_resource_start (iadev->pci, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) iadev->irq = iadev->pci->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) dev->number,error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) dev->number, iadev->pci->revision, real_base, iadev->irq);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) /* find mapping size of board */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) if (iadev->pci_map_size == 0x100000){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) iadev->num_vc = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) dev->ci_range.vci_bits = NR_VCI_4K_LD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) iadev->memSize = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) else if (iadev->pci_map_size == 0x40000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) iadev->num_vc = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) iadev->memSize = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) /* enable bus mastering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) pci_set_master(iadev->pci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) * Delay at least 1us before doing any mem accesses (how 'bout 10?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) /* mapping the physical address to a virtual address in address space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) if (!base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) dev->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) dev->number, iadev->pci->revision, base, iadev->irq);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) /* filling the iphase dev structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) iadev->mem = iadev->pci_map_size /2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) iadev->real_base = real_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) iadev->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) /* Bus Interface Control Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) iadev->reg = base + REG_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) /* Segmentation Control Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) iadev->seg_reg = base + SEG_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* Reassembly Control Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) iadev->reass_reg = base + REASS_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) /* Front end/ DMA control registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) iadev->phy = base + PHY_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) iadev->dma = base + PHY_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) /* RAM - Segmentation RAm and Reassembly RAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) iadev->ram = base + ACTUAL_RAM_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) /* lets print out the above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) iadev->reg,iadev->seg_reg,iadev->reass_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) iadev->phy, iadev->ram, iadev->seg_ram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) iadev->reass_ram);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) /* lets try reading the MAC address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) error = get_esi(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) iounmap(iadev->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) printk("IA: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) for (i=0; i < ESI_LEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) printk("%s%02X",i ? "-" : "",dev->esi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) /* reset SAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) if (reset_sar(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) iounmap(iadev->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) printk("IA: reset SAR fail, please try again\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) static void ia_update_stats(IADEV *iadev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (!iadev->carrier_detect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) static void ia_led_timer(struct timer_list *unused) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) u_char i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) static u32 ctrl_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) for (i = 0; i < iadev_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) if (ia_dev[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) if (blinking[i] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) blinking[i]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) ctrl_reg &= (~CTRL_LED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) ia_update_stats(ia_dev[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) blinking[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) ctrl_reg |= CTRL_LED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) if (ia_dev[i]->close_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) wake_up(&ia_dev[i]->close_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) ia_tx_poll(ia_dev[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) mod_timer(&ia_timer, jiffies + HZ / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) static void ia_phy_put(struct atm_dev *dev, unsigned char value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) writel(value, INPH_IA_DEV(dev)->phy+addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) return readl(INPH_IA_DEV(dev)->phy+addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) static void ia_free_tx(IADEV *iadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) kfree(iadev->desc_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) for (i = 0; i < iadev->num_vc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) kfree(iadev->testTable[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) kfree(iadev->testTable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) for (i = 0; i < iadev->num_tx_desc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) sizeof(*desc->cpcs), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) kfree(desc->cpcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) kfree(iadev->tx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) iadev->tx_dle_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) static void ia_free_rx(IADEV *iadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) kfree(iadev->rx_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) iadev->rx_dle_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) static int ia_start(struct atm_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) unsigned char phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) u32 ctrl_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) IF_EVENT(printk(">ia_start\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) dev->number, iadev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) /* @@@ should release IRQ on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) /* enabling memory + master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) if ((error = pci_write_config_word(iadev->pci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) PCI_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) "master (0x%x)\n",dev->number, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) /* Maybe we should reset the front end, initialize Bus Interface Control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) Registers and see. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) IF_INIT(printk("Bus ctrl reg: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) | CTRL_B8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) | CTRL_B16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) | CTRL_B32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) | CTRL_B48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) | CTRL_B64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) | CTRL_B128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) | CTRL_ERRMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) | CTRL_DLETMASK /* shud be removed l8r */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) | CTRL_DLERMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) | CTRL_SEGMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) | CTRL_REASSMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) | CTRL_FEMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) | CTRL_CSPREEMPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) printk("Bus status reg after init: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) ia_hw_type(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) error = tx_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) error = rx_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) goto err_free_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) phy = 0; /* resolve compiler complaint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) IF_INIT (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) if ((phy=ia_phy_get(dev,0)) == 0x30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) printk("IA: pm5346,rev.%d\n",phy&0x0f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) printk("IA: utopia,rev.%0x\n",phy);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) if (iadev->phy_type & FE_25MBIT_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) ia_mb25_init(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) ia_suni_pm7345_init(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) error = suni_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) goto err_free_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (dev->phy->start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) error = dev->phy->start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) goto err_free_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) /* Get iadev->carrier_detect status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) ia_frontend_intr(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) err_free_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) ia_free_rx(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) err_free_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) ia_free_tx(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) err_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) free_irq(iadev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) static void ia_close(struct atm_vcc *vcc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) u16 *vc_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) struct ia_vcc *ia_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) unsigned long closetime, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) iadev = INPH_IA_DEV(vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) ia_vcc = INPH_IA_VCC(vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) if (!ia_vcc) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) ia_vcc->vc_desc_cnt,vcc->vci);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) clear_bit(ATM_VF_READY,&vcc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) skb_queue_head_init (&tmp_tx_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) skb_queue_head_init (&tmp_vcc_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) if (vcc->qos.txtp.traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) iadev->close_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) schedule_timeout(msecs_to_jiffies(500));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) finish_wait(&iadev->timeout_wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) spin_lock_irqsave(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) while((skb = skb_dequeue(&iadev->tx_backlog))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (ATM_SKB(skb)->vcc == vcc){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) if (vcc->pop) vcc->pop(vcc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) else dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) skb_queue_tail(&tmp_tx_backlog, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) while((skb = skb_dequeue(&tmp_tx_backlog)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) skb_queue_tail(&iadev->tx_backlog, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) closetime = 300000 / ia_vcc->pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) if (closetime == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) closetime = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) spin_unlock_irqrestore(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) spin_lock_irqsave(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) iadev->close_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) iadev->testTable[vcc->vci]->lastTime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) iadev->testTable[vcc->vci]->fract = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) iadev->testTable[vcc->vci]->vc_status = VC_UBR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) if (vcc->qos.txtp.traffic_class == ATM_ABR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) if (vcc->qos.txtp.min_pcr > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) if (vcc->qos.txtp.traffic_class == ATM_CBR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) ia_vcc = INPH_IA_VCC(vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) ia_cbrVc_close (vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) spin_unlock_irqrestore(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) // reset reass table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) vc_table += vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) *vc_table = NO_AAL5_PKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) // reset vc table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) vc_table += vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) *vc_table = (vcc->vci << 6) | 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) struct abr_vc_table __iomem *abr_vc_table =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) abr_vc_table += vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) abr_vc_table->rdf = 0x0003;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) abr_vc_table->air = 0x5eb1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) // Drain the packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) rx_dle_intr(vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) iadev->rx_open[vcc->vci] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) kfree(INPH_IA_VCC(vcc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) ia_vcc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) vcc->dev_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) clear_bit(ATM_VF_ADDR,&vcc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) static int ia_open(struct atm_vcc *vcc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) struct ia_vcc *ia_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) IF_EVENT(printk("ia: not partially allocated resources\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) vcc->dev_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) IF_EVENT(printk("iphase open: unspec part\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) set_bit(ATM_VF_ADDR,&vcc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) if (vcc->qos.aal != ATM_AAL5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) vcc->dev->number, vcc->vpi, vcc->vci);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) /* Device dependent initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) if (!ia_vcc) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) vcc->dev_data = ia_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) if ((error = open_rx(vcc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) IF_EVENT(printk("iadev: error in open_rx, closing\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) ia_close(vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) if ((error = open_tx(vcc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) IF_EVENT(printk("iadev: error in open_tx, closing\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) ia_close(vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) set_bit(ATM_VF_READY,&vcc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) static u8 first = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) if (first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) ia_timer.expires = jiffies + 3*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) add_timer(&ia_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) IF_EVENT(printk("ia open returning\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) IF_EVENT(printk(">ia_change_qos\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) IA_CMDBUF ia_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) int i, board;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) u16 __user *tmps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) IF_EVENT(printk(">ia_ioctl\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) if (cmd != IA_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) if (!dev->phy->ioctl) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) return dev->phy->ioctl(dev,cmd,arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) board = ia_cmds.status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) if ((board < 0) || (board > iadev_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) board = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) board = array_index_nospec(board, iadev_count + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) iadev = ia_dev[board];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) switch (ia_cmds.cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) case MEMDUMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) switch (ia_cmds.sub_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) case MEMDUMP_SEGREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) if (!capable(CAP_NET_ADMIN)) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) tmps = (u16 __user *)ia_cmds.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) for(i=0; i<0x80; i+=2, tmps++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) ia_cmds.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) ia_cmds.len = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) case MEMDUMP_REASSREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) if (!capable(CAP_NET_ADMIN)) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) tmps = (u16 __user *)ia_cmds.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) for(i=0; i<0x80; i+=2, tmps++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) ia_cmds.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) ia_cmds.len = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) case MEMDUMP_FFL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) ia_regs_t *regs_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) ffredn_t *ffL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) rfredn_t *rfL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) if (!capable(CAP_NET_ADMIN)) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) if (!regs_local) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) ffL = ®s_local->ffredn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) rfL = ®s_local->rfredn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) /* Copy real rfred registers into the local copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) for (i=0; i<(sizeof (rfredn_t))/4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) /* Copy real ffred registers into the local copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) for (i=0; i<(sizeof (ffredn_t))/4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) kfree(regs_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) kfree(regs_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) printk("Board %d registers dumped\n", board);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) ia_cmds.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) case READ_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) if (!capable(CAP_NET_ADMIN)) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) desc_dbg(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) ia_cmds.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) case 0x6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) ia_cmds.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) case 0x8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) struct k_sonet_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) stats = &PRIV(_ia_dev[board])->sonet_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) printk("section_bip: %d\n", atomic_read(&stats->section_bip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) printk("line_bip : %d\n", atomic_read(&stats->line_bip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) printk("path_bip : %d\n", atomic_read(&stats->path_bip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) printk("line_febe : %d\n", atomic_read(&stats->line_febe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) printk("path_febe : %d\n", atomic_read(&stats->path_febe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) ia_cmds.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) case 0x9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) if (!capable(CAP_NET_ADMIN)) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) for (i = 1; i <= iadev->num_rx_desc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) free_desc(_ia_dev[board], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) iadev->reass_reg+REASS_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) iadev->rxing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) ia_cmds.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) case 0xb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) if (!capable(CAP_NET_ADMIN)) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) ia_frontend_intr(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) case 0xa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) if (!capable(CAP_NET_ADMIN)) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) ia_cmds.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) IADebugFlag = ia_cmds.maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) printk("New debug option loaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) ia_cmds.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) struct dle *wr_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) struct tx_buf_desc __iomem *buf_desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) int desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) int comp_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) int total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) struct cpcs_trailer *trailer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) struct ia_vcc *iavcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) iadev = INPH_IA_DEV(vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) iavcc = INPH_IA_VCC(vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) if (!iavcc->txing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) printk("discard packet on closed VC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) if (vcc->pop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) vcc->pop(vcc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) if (skb->len > iadev->tx_buf_sz - 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) printk("Transmit size over tx buffer size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) if (vcc->pop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) vcc->pop(vcc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) if ((unsigned long)skb->data & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) printk("Misaligned SKB\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) if (vcc->pop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) vcc->pop(vcc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) /* Get a descriptor number from our free descriptor queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) We get the descr number from the TCQ now, since I am using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) the TCQ as a free buffer queue. Initially TCQ will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) initialized with all the descriptors and is hence, full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) desc = get_desc (iadev, iavcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) if (desc == 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) comp_code = desc >> 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) desc &= 0x1fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) if ((desc == 0) || (desc > iadev->num_tx_desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) atomic_inc(&vcc->stats->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) if (vcc->pop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) vcc->pop(vcc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) return 0; /* return SUCCESS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) if (comp_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) desc, comp_code);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) /* remember the desc and vcc mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) iavcc->vc_desc_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) iadev->desc_tbl[desc-1].iavcc = iavcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) iadev->desc_tbl[desc-1].txskb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) IA_SKB_STATE(skb) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) iadev->ffL.tcq_rd += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) /* Put the descriptor number in the packet ready queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) and put the updated write pointer in the DLE field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) iadev->ffL.prq_wr += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) iadev->ffL.prq_wr = iadev->ffL.prq_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) /* Figure out the exact length of the packet and padding required to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) make it aligned on a 48 byte boundary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) total_len = skb->len + sizeof(struct cpcs_trailer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) total_len = ((total_len + 47) / 48) * 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) /* Put the packet in a tx buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) trailer = iadev->tx_buf[desc-1].cpcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) skb, skb->data, skb->len, desc);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) trailer->control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) /*big endian*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) trailer->crc32 = 0; /* not needed - dummy bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) /* Display the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) skb->len, tcnter++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) xdump(skb->data, skb->len, "TX: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) printk("\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) /* Build the buffer descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) buf_desc_ptr += desc; /* points to the corresponding entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) /* Huh ? p.115 of users guide describes this as a read-only register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) buf_desc_ptr->vc_index = vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) buf_desc_ptr->bytes = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) if (vcc->qos.txtp.traffic_class == ATM_ABR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) clear_lockup (vcc, iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) /* Build the DLE structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) wr_ptr = iadev->tx_dle_q.write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) buf_desc_ptr->buf_start_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) wr_ptr->bytes = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if ((wr_ptr->bytes >> 2) == 0xb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) wr_ptr->bytes = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) wr_ptr->mode = TX_DLE_PSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) wr_ptr->prq_wr_ptr_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) /* end is not to be used for the DLE q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) if (++wr_ptr == iadev->tx_dle_q.end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) wr_ptr = iadev->tx_dle_q.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) /* Build trailer dle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) wr_ptr->bytes = sizeof(struct cpcs_trailer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) wr_ptr->mode = DMA_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) /* end is not to be used for the DLE q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) if (++wr_ptr == iadev->tx_dle_q.end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) wr_ptr = iadev->tx_dle_q.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) iadev->tx_dle_q.write = wr_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) ATM_DESC(skb) = vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) skb_queue_tail(&iadev->tx_dma_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) atomic_inc(&vcc->stats->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) iadev->tx_pkt_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) /* Increment transaction counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) /* add flow control logic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) if (atomic_read(&vcc->stats->tx) % 20 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) if (iavcc->vc_desc_cnt > 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) vcc->tx_quota = vcc->tx_quota * 3 / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) iavcc->flow_inc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) iavcc->saved_tx_quota = vcc->tx_quota;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) iavcc->flow_inc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) IF_TX(printk("ia send done\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) iadev = INPH_IA_DEV(vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) printk(KERN_CRIT "null skb in ia_send\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) else dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) spin_lock_irqsave(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) if (!test_bit(ATM_VF_READY,&vcc->flags)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) spin_unlock_irqrestore(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) ATM_SKB(skb)->vcc = vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) if (skb_peek(&iadev->tx_backlog)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) skb_queue_tail(&iadev->tx_backlog, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) if (ia_pkt_tx (vcc, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) skb_queue_tail(&iadev->tx_backlog, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) spin_unlock_irqrestore(&iadev->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) int left = *pos, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) char *tmpPtr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) IADEV *iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) if(!left--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) if (iadev->phy_type == FE_25MBIT_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) if (iadev->phy_type == FE_DS3_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) n = sprintf(page, " Board Type : Iphase-ATM-DS3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) else if (iadev->phy_type == FE_E3_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) n = sprintf(page, " Board Type : Iphase-ATM-E3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) else if (iadev->phy_type == FE_UTP_OPTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) n = sprintf(page, " Board Type : Iphase-ATM-OC3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) tmpPtr = page + n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (iadev->pci_map_size == 0x40000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) n += sprintf(tmpPtr, "-1KVC-");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) n += sprintf(tmpPtr, "-4KVC-");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) tmpPtr = page + n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) n += sprintf(tmpPtr, "1M \n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) n += sprintf(tmpPtr, "512K\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) n += sprintf(tmpPtr, "128K\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) if (!left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) return sprintf(page, " Number of Tx Buffer: %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) " Size of Tx Buffer : %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) " Number of Rx Buffer: %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) " Size of Rx Buffer : %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) " Packets Received : %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) " Packets Transmitted: %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) " Cells Received : %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) " Cells Transmitted : %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) " Board Dropped Cells: %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) " Board Dropped Pkts : %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) iadev->num_tx_desc, iadev->tx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) iadev->num_rx_desc, iadev->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) iadev->rx_cell_cnt, iadev->tx_cell_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) iadev->drop_rxcell, iadev->drop_rxpkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) static const struct atmdev_ops ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) .open = ia_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) .close = ia_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) .ioctl = ia_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) .send = ia_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) .phy_put = ia_phy_put,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) .phy_get = ia_phy_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) .change_qos = ia_change_qos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) .proc_read = ia_proc_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) struct atm_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) IADEV *iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) if (!iadev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) iadev->pci = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) if (pci_enable_device(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) goto err_out_free_iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) goto err_out_disable_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) dev->dev_data = iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) iadev->LineRate);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) ia_dev[iadev_count] = iadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) _ia_dev[iadev_count] = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) iadev_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) if (ia_init(dev) || ia_start(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) IF_INIT(printk("IA register failed!\n");)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) iadev_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) ia_dev[iadev_count] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) _ia_dev[iadev_count] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) goto err_out_deregister_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) iadev->next_board = ia_boards;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) ia_boards = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) err_out_deregister_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) atm_dev_deregister(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) err_out_disable_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) err_out_free_iadev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) kfree(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) static void ia_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) struct atm_dev *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) IADEV *iadev = INPH_IA_DEV(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) /* Disable phy interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) SUNI_RSOP_CIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) if (dev->phy && dev->phy->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) dev->phy->stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) /* De-register device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) free_irq(iadev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) iadev_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) ia_dev[iadev_count] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) _ia_dev[iadev_count] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) atm_dev_deregister(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) iounmap(iadev->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) ia_free_rx(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) ia_free_tx(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) kfree(iadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) static const struct pci_device_id ia_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) { 0,}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) static struct pci_driver ia_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) .name = DEV_LABEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) .id_table = ia_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) .probe = ia_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) .remove = ia_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) static int __init ia_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) ret = pci_register_driver(&ia_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) ia_timer.expires = jiffies + 3*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) add_timer(&ia_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) printk(KERN_ERR DEV_LABEL ": no adapter found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) static void __exit ia_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) pci_unregister_driver(&ia_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) del_timer_sync(&ia_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) module_init(ia_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) module_exit(ia_module_exit);