^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) Madge Horizon ATM Adapter driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) Copyright (C) 1995-1999 Madge Networks Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) IMPORTANT NOTE: Madge Networks no longer makes the adapters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) supported by this driver and makes no commitment to maintain it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/atm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/atmdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sonet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "horizon.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define description_string "Madge ATM Horizon [Ultra] driver"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define version_string "1.2.1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static inline void __init show_version (void) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) printk ("%s version %s\n", description_string, version_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) CREDITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) Driver and documentation by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) Chris Aston Madge Networks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) Giuliano Procida Madge Networks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) Simon Benham Madge Networks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) Simon Johnson Madge Networks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) Various Others Madge Networks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) Some inspiration taken from other drivers by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) Alexandru Cucos UTBv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) Kari Mettinen University of Helsinki
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) Werner Almesberger EPFL LRC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) Theory of Operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) I Hardware, detection, initialisation and shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) 1. Supported Hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) This driver should handle all variants of the PCI Madge ATM adapters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) with the Horizon chipset. These are all PCI cards supporting PIO, BM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) DMA and a form of MMIO (registers only, not internal RAM).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) The driver is only known to work with SONET and UTP Horizon Ultra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) cards at 155Mb/s. However, code is in place to deal with both the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) original Horizon and 25Mb/s operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) There are two revisions of the Horizon ASIC: the original and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) Ultra. Details of hardware bugs are in section III.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) The ASIC version can be distinguished by chip markings but is NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) indicated by the PCI revision (all adapters seem to have PCI rev 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) I believe that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) Horizon => Collage 25 PCI Adapter (UTP and STP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) Horizon Ultra => Collage 155 PCI Client (UTP or SONET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) Ambassador x => Collage 155 PCI Server (completely different)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) Horizon (25Mb/s) is fitted with UTP and STP connectors. It seems to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) have a Madge B154 plus glue logic serializer. I have also found a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) really ancient version of this with slightly different glue. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) comes with the revision 0 (140-025-01) ASIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) Horizon Ultra (155Mb/s) is fitted with either a Pulse Medialink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) output (UTP) or an HP HFBR 5205 output (SONET). It has either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) Madge's SAMBA framer or a SUNI-lite device (early versions). It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) comes with the revision 1 (140-027-01) ASIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 2. Detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) All Horizon-based cards present with the same PCI Vendor and Device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) IDs. The standard Linux 2.2 PCI API is used to locate any cards and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) to enable bus-mastering (with appropriate latency).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ATM_LAYER_STATUS in the control register distinguishes between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) two possible physical layers (25 and 155). It is not clear whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) the 155 cards can also operate at 25Mbps. We rely on the fact that a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) card operates at 155 if and only if it has the newer Horizon Ultra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ASIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) For 155 cards the two possible framers are probed for and then set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) up for loop-timing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 3. Initialisation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) The card is reset and then put into a known state. The physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) layer is configured for normal operation at the appropriate speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) in the case of the 155 cards, the framer is initialised with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) line-based timing; the internal RAM is zeroed and the allocation of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) buffers for RX and TX is made; the Burnt In Address is read and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) copied to the ATM ESI; various policy settings for RX (VPI bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unknown VCs, oam cells) are made. Ideally all policy items should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) configurable at module load (if not actually on-demand), however,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) only the vpi vs vci bit allocation can be specified at insmod.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 4. Shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) This is in response to module_cleaup. No VCs are in use and the card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) should be idle; it is reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) II Driver software (as it should be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 0. Traffic Parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) The traffic classes (not an enumeration) are currently: ATM_NONE (no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) traffic), ATM_UBR, ATM_CBR, ATM_VBR and ATM_ABR, ATM_ANYCLASS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) (compatible with everything). Together with (perhaps only some of)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) the following items they make up the traffic specification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct atm_trafprm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned char traffic_class; traffic class (ATM_UBR, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int max_pcr; maximum PCR in cells per second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int pcr; desired PCR in cells per second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int min_pcr; minimum PCR in cells per second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int max_cdv; maximum CDV in microseconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int max_sdu; maximum SDU in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) Note that these denote bandwidth available not bandwidth used; the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) possibilities according to ATMF are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) Real Time (cdv and max CDT given)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) CBR(pcr) pcr bandwidth always available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) rtVBR(pcr,scr,mbs) scr bandwidth always available, up to pcr at mbs too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) Non Real Time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) nrtVBR(pcr,scr,mbs) scr bandwidth always available, up to pcr at mbs too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) UBR()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ABR(mcr,pcr) mcr bandwidth always available, up to pcr (depending) too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) mbs is max burst size (bucket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) pcr and scr have associated cdvt values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) mcr is like scr but has no cdtv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) cdtv may differ at each hop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) Some of the above items are qos items (as opposed to traffic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) parameters). We have nothing to do with qos. All except ABR can have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) their traffic parameters converted to GCRA parameters. The GCRA may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) be implemented as a (real-number) leaky bucket. The GCRA can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) in complicated ways by switches and in simpler ways by end-stations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) It can be used both to filter incoming cells and shape out-going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) cells.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ATM Linux actually supports:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ATM_NONE() (no traffic in this direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ATM_UBR(max_frame_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ATM_CBR(max/min_pcr, max_cdv, max_frame_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 0 or ATM_MAX_PCR are used to indicate maximum available PCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) A traffic specification consists of the AAL type and separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) traffic specifications for either direction. In ATM Linux it is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct atm_qos {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct atm_trafprm txtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct atm_trafprm rxtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned char aal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) AAL types are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ATM_NO_AAL AAL not specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ATM_AAL0 "raw" ATM cells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ATM_AAL1 AAL1 (CBR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ATM_AAL2 AAL2 (VBR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ATM_AAL34 AAL3/4 (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ATM_AAL5 AAL5 (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ATM_SAAL signaling AAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) The Horizon has support for AAL frame types: 0, 3/4 and 5. However,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) it does not implement AAL 3/4 SAR and it has a different notion of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) "raw cell" to ATM Linux's (48 bytes vs. 52 bytes) so neither are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) supported by this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) The Horizon has limited support for ABR (including UBR), VBR and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) CBR. Each TX channel has a bucket (containing up to 31 cell units)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) and two timers (PCR and SCR) associated with it that can be used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) govern cell emissions and host notification (in the case of ABR this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) is presumably so that RM cells may be emitted at appropriate times).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) The timers may either be disabled or may be set to any of 240 values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) (determined by the clock crystal, a fixed (?) per-device divider, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) configurable divider and a configurable timer preload value).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) At the moment only UBR and CBR are supported by the driver. VBR will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) be supported as soon as ATM for Linux supports it. ABR support is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) very unlikely as RM cell handling is completely up to the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 1. TX (TX channel setup and TX transfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) The TX half of the driver owns the TX Horizon registers. The TX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) component in the IRQ handler is the BM completion handler. This can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) only be entered when tx_busy is true (enforced by hardware). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) other TX component can only be entered when tx_busy is false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) (enforced by driver). So TX is single-threaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) Apart from a minor optimisation to not re-select the last channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) the TX send component works as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) Atomic test and set tx_busy until we succeed; we should implement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) some sort of timeout so that tx_busy will never be stuck at true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) If no TX channel is set up for this VC we wait for an idle one (if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) necessary) and set it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) At this point we have a TX channel ready for use. We wait for enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) buffers to become available then start a TX transmit (set the TX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) descriptor, schedule transfer, exit).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) The IRQ component handles TX completion (stats, free buffer, tx_busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unset, exit). We also re-schedule further transfers for the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) frame if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) TX setup in more detail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) TX open is a nop, the relevant information is held in the hrz_vcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) (vcc->dev_data) structure and is "cached" on the card.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) TX close gets the TX lock and clears the channel from the "cache".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 2. RX (Data Available and RX transfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) The RX half of the driver owns the RX registers. There are two RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) components in the IRQ handler: the data available handler deals with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) fresh data that has arrived on the card, the BM completion handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) is very similar to the TX completion handler. The data available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) handler grabs the rx_lock and it is only released once the data has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) been discarded or completely transferred to the host. The BM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) completion handler only runs when the lock is held; the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) available handler is locked out over the same period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) Data available on the card triggers an interrupt. If the data is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) suitable for our existing RX channels or we cannot allocate a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) it is flushed. Otherwise an RX receive is scheduled. Multiple RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) transfers may be scheduled for the same frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) RX setup in more detail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) RX open...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) RX close...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) III Hardware Bugs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 0. Byte vs Word addressing of adapter RAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) A design feature; see the .h file (especially the memory map).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 1. Bus Master Data Transfers (original Horizon only, fixed in Ultra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) The host must not start a transmit direction transfer at a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) non-four-byte boundary in host memory. Instead the host should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) perform a byte, or a two byte, or one byte followed by two byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) transfer in order to start the rest of the transfer on a four byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) boundary. RX is OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) Simultaneous transmit and receive direction bus master transfers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) not allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) The simplest solution to these two is to always do PIO (never DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) in the TX direction on the original Horizon. More complicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) solutions are likely to hurt my brain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 2. Loss of buffer on close VC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) When a VC is being closed, the buffer associated with it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) returned to the pool. The host must store the reference to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) buffer and when opening a new VC then give it to that new VC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) The host intervention currently consists of stacking such a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) pointer at VC close and checking the stack at VC open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 3. Failure to close a VC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) If a VC is currently receiving a frame then closing the VC may fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) and the frame continues to be received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) The solution is to make sure any received frames are flushed when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ready. This is currently done just before the solution to 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 4. PCI bus (original Horizon only, fixed in Ultra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) Reading from the data port prior to initialisation will hang the PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) bus. Just don't do that then! We don't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) IV To Do List
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) . Timer code may be broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) . Allow users to specify buffer allocation split for TX and RX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) . Deal once and for all with buggy VC close.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) . Handle interrupted and/or non-blocking operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) . Change some macros to functions and move from .h to .c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) . Try to limit the number of TX frames each VC may have queued, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) order to reduce the chances of TX buffer exhaustion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) . Implement VBR (bucket and timers not understood) and ABR (need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) do RM cells manually); also no Linux support for either.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) . Implement QoS changes on open VCs (involves extracting parts of VC open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) and close into separate functions and using them to make changes).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /********** globals **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static void do_housekeeping (struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static unsigned short debug = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static unsigned short vpi_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static int max_tx_size = 9000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int max_rx_size = 9000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static unsigned char pci_lat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /********** access functions **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* Read / Write Horizon registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static inline void wr_regl (const hrz_dev * dev, unsigned char reg, u32 data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) outl (cpu_to_le32 (data), dev->iobase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static inline u32 rd_regl (const hrz_dev * dev, unsigned char reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return le32_to_cpu (inl (dev->iobase + reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static inline void wr_regw (const hrz_dev * dev, unsigned char reg, u16 data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) outw (cpu_to_le16 (data), dev->iobase + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static inline u16 rd_regw (const hrz_dev * dev, unsigned char reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return le16_to_cpu (inw (dev->iobase + reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static inline void wrs_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) outsb (dev->iobase + reg, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static inline void rds_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) insb (dev->iobase + reg, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Read / Write to a given address in Horizon buffer memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) Interrupts must be disabled between the address register and data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) port accesses as these must form an atomic operation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static inline void wr_mem (const hrz_dev * dev, HDW * addr, u32 data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) // wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) wr_regl (dev, MEM_WR_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) wr_regl (dev, MEMORY_PORT_OFF, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static inline u32 rd_mem (const hrz_dev * dev, HDW * addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) // wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) wr_regl (dev, MEM_RD_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return rd_regl (dev, MEMORY_PORT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static inline void wr_framer (const hrz_dev * dev, u32 addr, u32 data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr | 0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) wr_regl (dev, MEMORY_PORT_OFF, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static inline u32 rd_framer (const hrz_dev * dev, u32 addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr | 0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return rd_regl (dev, MEMORY_PORT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /********** specialised access functions **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static inline void FLUSH_RX_CHANNEL (hrz_dev * dev, u16 channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) wr_regw (dev, RX_CHANNEL_PORT_OFF, FLUSH_CHANNEL | channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void WAIT_FLUSH_RX_COMPLETE (hrz_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & FLUSH_CHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static inline void SELECT_RX_CHANNEL (hrz_dev * dev, u16 channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) wr_regw (dev, RX_CHANNEL_PORT_OFF, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void WAIT_UPDATE_COMPLETE (hrz_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & RX_CHANNEL_UPDATE_IN_PROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /* TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static inline void SELECT_TX_CHANNEL (hrz_dev * dev, u16 tx_channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) wr_regl (dev, TX_CHANNEL_PORT_OFF, tx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* Update or query one configuration parameter of a particular channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode, u16 value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) chan * TX_CHANNEL_CONFIG_MULT | mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) wr_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /********** dump functions **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) #ifdef DEBUG_HORIZON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) unsigned char * data = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) for (i=0; i<skb->len && i < 256;i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) PRINTDM (DBG_DATA, "%02x ", data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) PRINTDE (DBG_DATA,"");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) (void) prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) (void) vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) (void) skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static inline void dump_regs (hrz_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) #ifdef DEBUG_HORIZON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) PRINTD (DBG_REGS, "CONTROL 0: %#x", rd_regl (dev, CONTROL_0_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) PRINTD (DBG_REGS, "RX CONFIG: %#x", rd_regw (dev, RX_CONFIG_OFF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) PRINTD (DBG_REGS, "TX CONFIG: %#x", rd_regw (dev, TX_CONFIG_OFF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) PRINTD (DBG_REGS, "TX STATUS: %#x", rd_regw (dev, TX_STATUS_OFF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) PRINTD (DBG_REGS, "IRQ ENBLE: %#x", rd_regl (dev, INT_ENABLE_REG_OFF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) PRINTD (DBG_REGS, "IRQ SORCE: %#x", rd_regl (dev, INT_SOURCE_REG_OFF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) (void) dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static inline void dump_framer (hrz_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) #ifdef DEBUG_HORIZON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) PRINTDB (DBG_REGS, "framer registers:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) for (i = 0; i < 0x10; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) PRINTDM (DBG_REGS, " %02x", rd_framer (dev, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) PRINTDE (DBG_REGS,"");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) (void) dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /********** VPI/VCI <-> (RX) channel conversions **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* RX channels are 10 bit integers, these fns are quite paranoid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) unsigned short vci_bits = 10 - vpi_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (0 <= vpi && vpi < 1<<vpi_bits && 0 <= vci && vci < 1<<vci_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) *channel = vpi<<vci_bits | vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return *channel ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /********** decode RX queue entries **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static inline u16 rx_q_entry_to_length (u32 x) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return x & RX_Q_ENTRY_LENGTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static inline u16 rx_q_entry_to_rx_channel (u32 x) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return (x>>RX_Q_ENTRY_CHANNEL_SHIFT) & RX_CHANNEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* Cell Transmit Rate Values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * the cell transmit rate (cells per sec) can be set to a variety of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * different values by specifying two parameters: a timer preload from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * 1 to 16 (stored as 0 to 15) and a clock divider (2 to the power of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * an exponent from 0 to 14; the special value 15 disables the timer).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * cellrate = baserate / (preload * 2^divider)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * The maximum cell rate that can be specified is therefore just the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * base rate. Halving the preload is equivalent to adding 1 to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * divider and so values 1 to 8 of the preload are redundant except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * in the case of a maximal divider (14).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * Given a desired cell rate, an algorithm to determine the preload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * and divider is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * a) x = baserate / cellrate, want p * 2^d = x (as far as possible)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * b) if x > 16 * 2^14 then set p = 16, d = 14 (min rate), done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * if x <= 16 then set p = x, d = 0 (high rates), done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * c) now have 16 < x <= 2^18, or 1 < x/16 <= 2^14 and we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * know n such that 2^(n-1) < x/16 <= 2^n, so slide a bit until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * we find the range (n will be between 1 and 14), set d = n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * d) Also have 8 < x/2^n <= 16, so set p nearest x/2^n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * The algorithm used below is a minor variant of the above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * The base rate is derived from the oscillator frequency (Hz) using a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * fixed divider:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * baserate = freq / 32 in the case of some Unknown Card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * baserate = freq / 8 in the case of the Horizon 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * baserate = freq / 8 in the case of the Horizon Ultra 155
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * The Horizon cards have oscillators and base rates as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * Card Oscillator Base Rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * Unknown Card 33 MHz 1.03125 MHz (33 MHz = PCI freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * Horizon 25 32 MHz 4 MHz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * Horizon Ultra 155 40 MHz 5 MHz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * The following defines give the base rates in Hz. These were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * previously a factor of 100 larger, no doubt someone was using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * cps*100.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) #define BR_UKN 1031250l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) #define BR_HRZ 4000000l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) #define BR_ULT 5000000l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) // d is an exponent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) #define CR_MIND 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) #define CR_MAXD 14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) // p ranges from 1 to a power of 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) #define CR_MAXPEXP 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static int make_rate (const hrz_dev * dev, u32 c, rounding r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) u16 * bits, unsigned int * actual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) // note: rounding the rate down means rounding 'p' up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) const unsigned long br = test_bit(ultra, &dev->flags) ? BR_ULT : BR_HRZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) u32 div = CR_MIND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) u32 pre;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) // br_exp and br_man are used to avoid overflowing (c*maxp*2^d) in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) // the tests below. We could think harder about exact possibilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) // of failure...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) unsigned long br_man = br;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) unsigned int br_exp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) PRINTD (DBG_QOS|DBG_FLOW, "make_rate b=%lu, c=%u, %s", br, c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) r == round_up ? "up" : r == round_down ? "down" : "nearest");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) // avoid div by zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (!c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) PRINTD (DBG_QOS|DBG_ERR, "zero rate is not allowed!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) while (br_exp < CR_MAXPEXP + CR_MIND && (br_man % 2 == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) br_man = br_man >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) ++br_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) // (br >>br_exp) <<br_exp == br and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) // br_exp <= CR_MAXPEXP+CR_MIND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (br_man <= (c << (CR_MAXPEXP+CR_MIND-br_exp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) // Equivalent to: B <= (c << (MAXPEXP+MIND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) // take care of rounding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) switch (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) case round_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) pre = DIV_ROUND_UP(br, c<<div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) // but p must be non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (!pre)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) pre = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) case round_nearest:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) pre = DIV_ROUND_CLOSEST(br, c<<div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) // but p must be non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (!pre)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pre = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) default: /* round_up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) pre = br/(c<<div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) // but p must be non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (!pre)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) PRINTD (DBG_QOS, "A: p=%u, d=%u", pre, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) // at this point we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) // d == MIND and (c << (MAXPEXP+MIND)) < B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) while (div < CR_MAXD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) div++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (br_man <= (c << (CR_MAXPEXP+div-br_exp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) // Equivalent to: B <= (c << (MAXPEXP+d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) // c << (MAXPEXP+d-1) < B <= c << (MAXPEXP+d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) // 1 << (MAXPEXP-1) < B/2^d/c <= 1 << MAXPEXP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) // MAXP/2 < B/c2^d <= MAXP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) // take care of rounding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) switch (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) case round_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) pre = DIV_ROUND_UP(br, c<<div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) case round_nearest:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) pre = DIV_ROUND_CLOSEST(br, c<<div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) default: /* round_up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) pre = br/(c<<div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) PRINTD (DBG_QOS, "B: p=%u, d=%u", pre, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) goto got_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) // at this point we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) // d == MAXD and (c << (MAXPEXP+MAXD)) < B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) // but we cannot go any higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) // take care of rounding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (r == round_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) pre = 1 << CR_MAXPEXP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) PRINTD (DBG_QOS, "C: p=%u, d=%u", pre, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) got_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) // paranoia
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (div > CR_MAXD || (!pre) || pre > 1<<CR_MAXPEXP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) PRINTD (DBG_QOS, "set_cr internal failure: d=%u p=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) div, pre);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) *bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (actual) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) *actual = DIV_ROUND_UP(br, pre<<div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) PRINTD (DBG_QOS, "actual rate: %u", *actual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static int make_rate_with_tolerance (const hrz_dev * dev, u32 c, rounding r, unsigned int tol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) u16 * bit_pattern, unsigned int * actual) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) unsigned int my_actual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) PRINTD (DBG_QOS|DBG_FLOW, "make_rate_with_tolerance c=%u, %s, tol=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) c, (r == round_up) ? "up" : (r == round_down) ? "down" : "nearest", tol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (!actual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) // actual rate is not returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) actual = &my_actual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (make_rate (dev, c, round_nearest, bit_pattern, actual))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) // should never happen as round_nearest always succeeds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (c - tol <= *actual && *actual <= c + tol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) // within tolerance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) // intolerant, try rounding instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return make_rate (dev, c, r, bit_pattern, actual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /********** Listen on a VC **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static int hrz_open_rx (hrz_dev * dev, u16 channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) // is there any guarantee that we don't get two simulataneous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) // identical calls of this function from different processes? yes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) // rate_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) u32 channel_type; // u16?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) u16 buf_ptr = RX_CHANNEL_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) rx_ch_desc * rx_desc = &memmap->rx_descs[channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) PRINTD (DBG_FLOW, "hrz_open_rx %x", channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) spin_lock_irqsave (&dev->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) channel_type = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) spin_unlock_irqrestore (&dev->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) // very serious error, should never occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (channel_type != RX_CHANNEL_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) PRINTD (DBG_ERR|DBG_VCC, "RX channel for VC already open");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return -EBUSY; // clean up?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) // Give back spare buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (dev->noof_spare_buffers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) buf_ptr = dev->spare_buffers[--dev->noof_spare_buffers];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) PRINTD (DBG_VCC, "using a spare buffer: %u", buf_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) // should never occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (buf_ptr == RX_CHANNEL_DISABLED || buf_ptr == RX_CHANNEL_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) // but easy to recover from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) PRINTD (DBG_ERR|DBG_VCC, "bad spare buffer pointer, using IDLE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) buf_ptr = RX_CHANNEL_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) PRINTD (DBG_VCC, "using IDLE buffer pointer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) // Channel is currently disabled so change its status to idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) // do we really need to save the flags again?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) spin_lock_irqsave (&dev->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) wr_mem (dev, &rx_desc->wr_buf_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) buf_ptr | CHANNEL_TYPE_AAL5 | FIRST_CELL_OF_AAL5_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (buf_ptr != RX_CHANNEL_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) wr_mem (dev, &rx_desc->rd_buf_type, buf_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) spin_unlock_irqrestore (&dev->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) // rxer->rate = make_rate (qos->peak_cells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) PRINTD (DBG_FLOW, "hrz_open_rx ok");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /********** change vc rate for a given vc **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static void hrz_change_vc_qos (ATM_RXER * rxer, MAAL_QOS * qos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) rxer->rate = make_rate (qos->peak_cells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /********** free an skb (as per ATM device driver documentation) **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static void hrz_kfree_skb (struct sk_buff * skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (ATM_SKB(skb)->vcc->pop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) dev_kfree_skb_any (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /********** cancel listen on a VC **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static void hrz_close_rx (hrz_dev * dev, u16 vc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) u32 r1, r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) rx_ch_desc * rx_desc = &memmap->rx_descs[vc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) int was_idle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) spin_lock_irqsave (&dev->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) value = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) spin_unlock_irqrestore (&dev->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (value == RX_CHANNEL_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) // I suppose this could happen once we deal with _NONE traffic properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) PRINTD (DBG_VCC, "closing VC: RX channel %u already disabled", vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (value == RX_CHANNEL_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) was_idle = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) spin_lock_irqsave (&dev->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) wr_mem (dev, &rx_desc->wr_buf_type, RX_CHANNEL_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if ((rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK) == RX_CHANNEL_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) was_idle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (was_idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) spin_unlock_irqrestore (&dev->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) WAIT_FLUSH_RX_COMPLETE(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) // XXX Is this all really necessary? We can rely on the rx_data_av
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) // handler to discard frames that remain queued for delivery. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) // worry is that immediately reopening the channel (perhaps by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) // different process) may cause some data to be mis-delivered then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) // there may still be a simpler solution (such as busy-waiting on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) // rx_busy once the channel is disabled or before a new one is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) // opened - does this leave any holes?). Arguably setting up and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) // tearing down the TX and RX halves of each virtual circuit could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) // most safely be done within ?x_busy protected regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) // OK, current changes are that Simon's marker is disabled and we DO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) // look for NULL rxer elsewhere. The code here seems flush frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) // and then remember the last dead cell belonging to the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) // just disabled - the cell gets relinked at the next vc_open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) // However, when all VCs are closed or only a few opened there are a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) // handful of buffers that are unusable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) // Does anyone feel like documenting spare_buffers properly?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) // Does anyone feel like fixing this in a nicer way?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) // Flush any data which is left in the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) // Change the rx channel port to something different to the RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) // channel we are trying to close to force Horizon to flush the rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) // channel read and write pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) u16 other = vc^(RX_CHANS/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) SELECT_RX_CHANNEL (dev, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) WAIT_UPDATE_COMPLETE (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) r1 = rd_mem (dev, &rx_desc->rd_buf_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) // Select this RX channel. Flush doesn't seem to work unless we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) // select an RX channel before hand
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) SELECT_RX_CHANNEL (dev, vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) WAIT_UPDATE_COMPLETE (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) // Attempt to flush a frame on this RX channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) FLUSH_RX_CHANNEL (dev, vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) WAIT_FLUSH_RX_COMPLETE (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) // Force Horizon to flush rx channel read and write pointers as before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) SELECT_RX_CHANNEL (dev, other);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) WAIT_UPDATE_COMPLETE (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) r2 = rd_mem (dev, &rx_desc->rd_buf_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) PRINTD (DBG_VCC|DBG_RX, "r1 = %u, r2 = %u", r1, r2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (r1 == r2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) dev->spare_buffers[dev->noof_spare_buffers++] = (u16)r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) rx_q_entry * wr_ptr = &memmap->rx_q_entries[rd_regw (dev, RX_QUEUE_WR_PTR_OFF)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) rx_q_entry * rd_ptr = dev->rx_q_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) PRINTD (DBG_VCC|DBG_RX, "rd_ptr = %u, wr_ptr = %u", rd_ptr, wr_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) while (rd_ptr != wr_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) u32 x = rd_mem (dev, (HDW *) rd_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (vc == rx_q_entry_to_rx_channel (x)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) x |= SIMONS_DODGEY_MARKER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) PRINTD (DBG_RX|DBG_VCC|DBG_WARN, "marking a frame as dodgey");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) wr_mem (dev, (HDW *) rd_ptr, x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (rd_ptr == dev->rx_q_wrap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) rd_ptr = dev->rx_q_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) rd_ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) spin_unlock_irqrestore (&dev->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /********** schedule RX transfers **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) // Note on tail recursion: a GCC developer said that it is not likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) // to be fixed soon, so do not define TAILRECUSRIONWORKS unless you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) // are sure it does as you may otherwise overflow the kernel stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) // giving this fn a return value would help GCC, allegedly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static void rx_schedule (hrz_dev * dev, int irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) unsigned int rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) int pio_instead = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) #ifndef TAILRECURSIONWORKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) pio_instead = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) while (pio_instead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) // bytes waiting for RX transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) rx_bytes = dev->rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) spin_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) while (rd_regl (dev, MASTER_RX_COUNT_REG_OFF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) PRINTD (DBG_RX|DBG_WARN, "RX error: other PCI Bus Master RX still in progress!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (++spin_count > 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) PRINTD (DBG_RX|DBG_ERR, "spun out waiting PCI Bus Master RX completion");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) clear_bit (rx_busy, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) hrz_kfree_skb (dev->rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) // this code follows the TX code but (at the moment) there is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) // one region - the skb itself. I don't know if this will change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) // but it doesn't hurt to have the code here, disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (rx_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) // start next transfer within same region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (rx_bytes <= MAX_PIO_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) PRINTD (DBG_RX|DBG_BUS, "(pio)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) pio_instead = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (rx_bytes <= MAX_TRANSFER_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) PRINTD (DBG_RX|DBG_BUS, "(simple or last multi)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dev->rx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) PRINTD (DBG_RX|DBG_BUS, "(continuing multi)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) rx_bytes = MAX_TRANSFER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) // rx_bytes == 0 -- we're between regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) // regions remaining to transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) unsigned int rx_regions = dev->rx_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) unsigned int rx_regions = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (rx_regions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) // start a new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) dev->rx_addr = dev->rx_iovec->iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) rx_bytes = dev->rx_iovec->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) ++dev->rx_iovec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) dev->rx_regions = rx_regions - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (rx_bytes <= MAX_PIO_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) PRINTD (DBG_RX|DBG_BUS, "(pio)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) pio_instead = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (rx_bytes <= MAX_TRANSFER_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) PRINTD (DBG_RX|DBG_BUS, "(full region)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) dev->rx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) PRINTD (DBG_RX|DBG_BUS, "(start multi region)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) rx_bytes = MAX_TRANSFER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) // rx_regions == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) // that's all folks - end of frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct sk_buff * skb = dev->rx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) // dev->rx_iovec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) FLUSH_RX_CHANNEL (dev, dev->rx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) dump_skb ("<<<", dev->rx_channel, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) PRINTD (DBG_RX|DBG_SKB, "push %p %u", skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) // VC layer stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) atomic_inc(&vcc->stats->rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) __net_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) // end of our responsibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) vcc->push (vcc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) // note: writing RX_COUNT clears any interrupt condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (rx_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (pio_instead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) rds_regb (dev, DATA_PORT_OFF, dev->rx_addr, rx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) wr_regl (dev, MASTER_RX_ADDR_REG_OFF, virt_to_bus (dev->rx_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) wr_regl (dev, MASTER_RX_COUNT_REG_OFF, rx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) dev->rx_addr += rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) // allow another RX thread to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) YELLOW_LED_ON(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) clear_bit (rx_busy, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) PRINTD (DBG_RX, "cleared rx_busy for dev %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) #ifdef TAILRECURSIONWORKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) // and we all bless optimised tail calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (pio_instead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return rx_schedule (dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) // grrrrrrr!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /********** handle RX bus master complete events **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static void rx_bus_master_complete_handler (hrz_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (test_bit (rx_busy, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) rx_schedule (dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) PRINTD (DBG_RX|DBG_ERR, "unexpected RX bus master completion");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) // clear interrupt condition on adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /********** (queue to) become the next TX thread **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static int tx_hold (hrz_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) PRINTD (DBG_TX, "sleeping at tx lock %p %lu", dev, dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) wait_event_interruptible(dev->tx_queue, (!test_and_set_bit(tx_busy, &dev->flags)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) PRINTD (DBG_TX, "woken at tx lock %p %lu", dev, dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (signal_pending (current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) PRINTD (DBG_TX, "set tx_busy for dev %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /********** allow another TX thread to start **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static inline void tx_release (hrz_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) clear_bit (tx_busy, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) PRINTD (DBG_TX, "cleared tx_busy for dev %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) wake_up_interruptible (&dev->tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /********** schedule TX transfers **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static void tx_schedule (hrz_dev * const dev, int irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) unsigned int tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) int append_desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) int pio_instead = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) #ifndef TAILRECURSIONWORKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) pio_instead = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) while (pio_instead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) // bytes in current region waiting for TX transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) tx_bytes = dev->tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) spin_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) while (rd_regl (dev, MASTER_TX_COUNT_REG_OFF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) PRINTD (DBG_TX|DBG_WARN, "TX error: other PCI Bus Master TX still in progress!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (++spin_count > 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) PRINTD (DBG_TX|DBG_ERR, "spun out waiting PCI Bus Master TX completion");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) tx_release (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) hrz_kfree_skb (dev->tx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (tx_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) // start next transfer within same region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) PRINTD (DBG_TX|DBG_BUS, "(pio)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) pio_instead = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (tx_bytes <= MAX_TRANSFER_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) PRINTD (DBG_TX|DBG_BUS, "(simple or last multi)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (!dev->tx_iovec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) // end of last region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) append_desc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) dev->tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) PRINTD (DBG_TX|DBG_BUS, "(continuing multi)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) tx_bytes = MAX_TRANSFER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) // tx_bytes == 0 -- we're between regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) // regions remaining to transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) unsigned int tx_regions = dev->tx_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (tx_regions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) // start a new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) dev->tx_addr = dev->tx_iovec->iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) tx_bytes = dev->tx_iovec->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) ++dev->tx_iovec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) dev->tx_regions = tx_regions - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) PRINTD (DBG_TX|DBG_BUS, "(pio)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) pio_instead = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (tx_bytes <= MAX_TRANSFER_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) PRINTD (DBG_TX|DBG_BUS, "(full region)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) dev->tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) PRINTD (DBG_TX|DBG_BUS, "(start multi region)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) tx_bytes = MAX_TRANSFER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) // tx_regions == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) // that's all folks - end of frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct sk_buff * skb = dev->tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) dev->tx_iovec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) // VC layer stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) // free the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) hrz_kfree_skb (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) // note: writing TX_COUNT clears any interrupt condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (tx_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (pio_instead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) wrs_regb (dev, DATA_PORT_OFF, dev->tx_addr, tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (append_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) wr_regl (dev, TX_DESCRIPTOR_PORT_OFF, cpu_to_be32 (dev->tx_skb->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) wr_regl (dev, MASTER_TX_ADDR_REG_OFF, virt_to_bus (dev->tx_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (append_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) wr_regl (dev, TX_DESCRIPTOR_REG_OFF, cpu_to_be32 (dev->tx_skb->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) wr_regl (dev, MASTER_TX_COUNT_REG_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) append_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) ? tx_bytes | MASTER_TX_AUTO_APPEND_DESC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) : tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) dev->tx_addr += tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) YELLOW_LED_ON(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) tx_release (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) #ifdef TAILRECURSIONWORKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) // and we all bless optimised tail calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (pio_instead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return tx_schedule (dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) // grrrrrrr!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /********** handle TX bus master complete events **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static void tx_bus_master_complete_handler (hrz_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (test_bit (tx_busy, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) tx_schedule (dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) PRINTD (DBG_TX|DBG_ERR, "unexpected TX bus master completion");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) // clear interrupt condition on adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /********** move RX Q pointer to next item in circular buffer **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) // called only from IRQ sub-handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) static u32 rx_queue_entry_next (hrz_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) u32 rx_queue_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) spin_lock (&dev->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) rx_queue_entry = rd_mem (dev, &dev->rx_q_entry->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (dev->rx_q_entry == dev->rx_q_wrap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) dev->rx_q_entry = dev->rx_q_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) dev->rx_q_entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) wr_regw (dev, RX_QUEUE_RD_PTR_OFF, dev->rx_q_entry - dev->rx_q_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) spin_unlock (&dev->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return rx_queue_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /********** handle RX data received by device **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) // called from IRQ handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) static void rx_data_av_handler (hrz_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) u32 rx_queue_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) u32 rx_queue_entry_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) u16 rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) u16 rx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) PRINTD (DBG_FLOW, "hrz_data_av_handler");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) // try to grab rx lock (not possible during RX bus mastering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (test_and_set_bit (rx_busy, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) PRINTD (DBG_RX, "locked out of rx lock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) PRINTD (DBG_RX, "set rx_busy for dev %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) // lock is cleared if we fail now, o/w after bus master completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) YELLOW_LED_OFF(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) rx_queue_entry = rx_queue_entry_next (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) rx_len = rx_q_entry_to_length (rx_queue_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) rx_channel = rx_q_entry_to_rx_channel (rx_queue_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) WAIT_FLUSH_RX_COMPLETE (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) SELECT_RX_CHANNEL (dev, rx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) PRINTD (DBG_RX, "rx_queue_entry is: %#x", rx_queue_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) rx_queue_entry_flags = rx_queue_entry & (RX_CRC_32_OK|RX_COMPLETE_FRAME|SIMONS_DODGEY_MARKER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (!rx_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) // (at least) bus-mastering breaks if we try to handle a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) // zero-length frame, besides AAL5 does not support them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) PRINTK (KERN_ERR, "zero-length frame!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) rx_queue_entry_flags &= ~RX_COMPLETE_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (rx_queue_entry_flags & SIMONS_DODGEY_MARKER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) PRINTD (DBG_RX|DBG_ERR, "Simon's marker detected!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (rx_queue_entry_flags == (RX_CRC_32_OK | RX_COMPLETE_FRAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) struct atm_vcc * atm_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) PRINTD (DBG_RX, "got a frame on rx_channel %x len %u", rx_channel, rx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) atm_vcc = dev->rxer[rx_channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) // if no vcc is assigned to this channel, we should drop the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) // (is this what SIMONS etc. was trying to achieve?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (atm_vcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (rx_len <= atm_vcc->qos.rxtp.max_sdu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct sk_buff * skb = atm_alloc_charge (atm_vcc, rx_len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) // remember this so we can push it later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) dev->rx_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) // remember this so we can flush it later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) dev->rx_channel = rx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) // prepare socket buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) skb_put (skb, rx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) ATM_SKB(skb)->vcc = atm_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) // simple transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) // dev->rx_regions = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) // dev->rx_iovec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) dev->rx_bytes = rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) dev->rx_addr = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) PRINTD (DBG_RX, "RX start simple transfer (addr %p, len %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) skb->data, rx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) // do the business
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) rx_schedule (dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) PRINTD (DBG_SKB|DBG_WARN, "failed to get skb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) PRINTK (KERN_INFO, "frame received on TX-only VC %x", rx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) // do we count this?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) PRINTK (KERN_WARNING, "dropped over-size frame");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) // do we count this?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) PRINTD (DBG_WARN|DBG_VCC|DBG_RX, "no VCC for this frame (VC closed)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) // do we count this?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) // Wait update complete ? SPONG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) // RX was aborted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) YELLOW_LED_ON(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) FLUSH_RX_CHANNEL (dev,rx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) clear_bit (rx_busy, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /********** interrupt handler **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) static irqreturn_t interrupt_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) hrz_dev *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) u32 int_source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) unsigned int irq_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) PRINTD (DBG_FLOW, "interrupt_handler: %p", dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) // definitely for us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) irq_ok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) while ((int_source = rd_regl (dev, INT_SOURCE_REG_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) & INTERESTING_INTERRUPTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) // In the interests of fairness, the handlers below are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) // called in sequence and without immediate return to the head of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) // the while loop. This is only of issue for slow hosts (or when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) // debugging messages are on). Really slow hosts may find a fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) // sender keeps them permanently in the IRQ handler. :(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) // (only an issue for slow hosts) RX completion goes before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) // rx_data_av as the former implies rx_busy and so the latter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) // would just abort. If it reschedules another transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) // (continuing the same frame) then it will not clear rx_busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) // (only an issue for slow hosts) TX completion goes before RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) // data available as it is a much shorter routine - there is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) // chance that any further transfers it schedules will be complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) // by the time of the return to the head of the while loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (int_source & RX_BUS_MASTER_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) ++irq_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) PRINTD (DBG_IRQ|DBG_BUS|DBG_RX, "rx_bus_master_complete asserted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) rx_bus_master_complete_handler (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (int_source & TX_BUS_MASTER_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) ++irq_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) PRINTD (DBG_IRQ|DBG_BUS|DBG_TX, "tx_bus_master_complete asserted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) tx_bus_master_complete_handler (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (int_source & RX_DATA_AV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) ++irq_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) PRINTD (DBG_IRQ|DBG_RX, "rx_data_av asserted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) rx_data_av_handler (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (irq_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) PRINTD (DBG_IRQ, "work done: %u", irq_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) PRINTD (DBG_IRQ|DBG_WARN, "spurious interrupt source: %#x", int_source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (irq_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /********** housekeeping **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static void do_housekeeping (struct timer_list *t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) // just stats at the moment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) hrz_dev * dev = from_timer(dev, t, housekeeping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) // collect device-specific (not driver/atm-linux) stats here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) dev->rx_cell_count += rd_regw (dev, RX_CELL_COUNT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) dev->hec_error_count += rd_regw (dev, HEC_ERROR_COUNT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) dev->unassigned_cell_count += rd_regw (dev, UNASSIGNED_CELL_COUNT_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) mod_timer (&dev->housekeeping, jiffies + HZ/10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) /********** find an idle channel for TX and set it up **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) // called with tx_busy set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) static short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) unsigned short idle_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) short tx_channel = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) unsigned int spin_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) PRINTD (DBG_FLOW|DBG_TX, "setup_idle_tx_channel %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) // better would be to fail immediately, the caller can then decide whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) // to wait or drop (depending on whether this is UBR etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) spin_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) while (!(idle_channels = rd_regw (dev, TX_STATUS_OFF) & IDLE_CHANNELS_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) PRINTD (DBG_TX|DBG_WARN, "waiting for idle TX channel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) // delay a bit here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (++spin_count > 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) PRINTD (DBG_TX|DBG_ERR, "spun out waiting for idle TX channel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) // got an idle channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) // tx_idle ensures we look for idle channels in RR order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) int chan = dev->tx_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) int keep_going = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) while (keep_going) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (idle_channels & (1<<chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) tx_channel = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) keep_going = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) ++chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (chan == TX_CHANS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) chan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) dev->tx_idle = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) // set up the channel we found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) // Initialise the cell header in the transmit channel descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) // a.k.a. prepare the channel and remember that we have done so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) tx_ch_desc * tx_desc = &memmap->tx_descs[tx_channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) u32 rd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) u32 wr_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) u16 channel = vcc->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) spin_lock_irqsave (&dev->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) // Update the transmit channel record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) dev->tx_channel_record[tx_channel] = channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) // xBR channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) update_tx_channel_config (dev, tx_channel, RATE_TYPE_ACCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) vcc->tx_xbr_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) // Update the PCR counter preload value etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) update_tx_channel_config (dev, tx_channel, PCR_TIMER_ACCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) vcc->tx_pcr_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (vcc->tx_xbr_bits == VBR_RATE_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) // SCR timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) update_tx_channel_config (dev, tx_channel, SCR_TIMER_ACCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) vcc->tx_scr_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) // Bucket size...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) update_tx_channel_config (dev, tx_channel, BUCKET_CAPACITY_ACCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) vcc->tx_bucket_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) // ... and fullness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) update_tx_channel_config (dev, tx_channel, BUCKET_FULLNESS_ACCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) vcc->tx_bucket_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) // Initialise the read and write buffer pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) rd_ptr = rd_mem (dev, &tx_desc->rd_buf_type) & BUFFER_PTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) wr_ptr = rd_mem (dev, &tx_desc->wr_buf_type) & BUFFER_PTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) // idle TX channels should have identical pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (rd_ptr != wr_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) PRINTD (DBG_TX|DBG_ERR, "TX buffer pointers are broken!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) // spin_unlock... return -E...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) // I wonder if gcc would get rid of one of the pointer aliases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) PRINTD (DBG_TX, "TX buffer pointers are: rd %x, wr %x.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) rd_ptr, wr_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) switch (vcc->aal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) case aal0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) rd_ptr |= CHANNEL_TYPE_RAW_CELLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) wr_ptr |= CHANNEL_TYPE_RAW_CELLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) case aal34:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal34");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) rd_ptr |= CHANNEL_TYPE_AAL3_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) wr_ptr |= CHANNEL_TYPE_AAL3_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) case aal5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) rd_ptr |= CHANNEL_TYPE_AAL5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) wr_ptr |= CHANNEL_TYPE_AAL5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) // Initialise the CRC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) wr_mem (dev, &tx_desc->partial_crc, INITIAL_CRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) wr_mem (dev, &tx_desc->rd_buf_type, rd_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) wr_mem (dev, &tx_desc->wr_buf_type, wr_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) // Write the Cell Header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) // Payload Type, CLP and GFC would go here if non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) wr_mem (dev, &tx_desc->cell_header, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) spin_unlock_irqrestore (&dev->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return tx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) /********** send a frame **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) unsigned int spin_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) int free_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) hrz_vcc * vcc = HRZ_VCC(atm_vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) u16 channel = vcc->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) u32 buffers_required;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) /* signed for error return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) short tx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) PRINTD (DBG_FLOW|DBG_TX, "hrz_send vc %x data %p len %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) channel, skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) dump_skb (">>>", channel, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (atm_vcc->qos.txtp.traffic_class == ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) hrz_kfree_skb (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) // don't understand this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) ATM_SKB(skb)->vcc = atm_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (skb->len > atm_vcc->qos.txtp.max_sdu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) hrz_kfree_skb (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (!channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) PRINTD (DBG_ERR|DBG_TX, "attempt to transmit on zero (rx_)channel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) hrz_kfree_skb (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) // where would be a better place for this? housekeeping?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) pci_read_config_word (dev->pci_dev, PCI_STATUS, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (status & PCI_STATUS_REC_MASTER_ABORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) PRINTD (DBG_BUS|DBG_ERR, "Clearing PCI Master Abort (and cleaning up)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) status &= ~PCI_STATUS_REC_MASTER_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) pci_write_config_word (dev->pci_dev, PCI_STATUS, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (test_bit (tx_busy, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) hrz_kfree_skb (dev->tx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) tx_release (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) #ifdef DEBUG_HORIZON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* wey-hey! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (channel == 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) unsigned short d = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) char * s = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (*s++ == 'D') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) for (i = 0; i < 4; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) d = (d << 4) | hex_to_bin(*s++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) // wait until TX is free and grab lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (tx_hold (dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) hrz_kfree_skb (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) // Wait for enough space to be available in transmit buffer memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) // should be number of cells needed + 2 (according to hardware docs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) // = ((framelen+8)+47) / 48 + 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) // = (framelen+7) / 48 + 3, hmm... faster to put addition inside XXX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) buffers_required = (skb->len+(ATM_AAL5_TRAILER-1)) / ATM_CELL_PAYLOAD + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) // replace with timer and sleep, add dev->tx_buffers_queue (max 1 entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) spin_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) while ((free_buffers = rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF)) < buffers_required) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) PRINTD (DBG_TX, "waiting for free TX buffers, got %d of %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) free_buffers, buffers_required);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) // what is the appropriate delay? implement a timeout? (depending on line speed?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) // mdelay (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) // what happens if we kill (current_pid, SIGKILL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (++spin_count > 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) PRINTD (DBG_TX|DBG_ERR, "spun out waiting for tx buffers, got %d of %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) free_buffers, buffers_required);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) tx_release (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) hrz_kfree_skb (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) // Select a channel to transmit the frame on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (channel == dev->last_vc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) PRINTD (DBG_TX, "last vc hack: hit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) tx_channel = dev->tx_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) PRINTD (DBG_TX, "last vc hack: miss");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) // Are we currently transmitting this VC on one of the channels?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) for (tx_channel = 0; tx_channel < TX_CHANS; ++tx_channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (dev->tx_channel_record[tx_channel] == channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) PRINTD (DBG_TX, "vc already on channel: hit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (tx_channel == TX_CHANS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) PRINTD (DBG_TX, "vc already on channel: miss");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) // Find and set up an idle channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) tx_channel = setup_idle_tx_channel (dev, vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (tx_channel < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) PRINTD (DBG_TX|DBG_ERR, "failed to get channel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) tx_release (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return tx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) PRINTD (DBG_TX, "got channel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) SELECT_TX_CHANNEL(dev, tx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) dev->last_vc = channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) dev->tx_last = tx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) PRINTD (DBG_TX, "using channel %u", tx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) YELLOW_LED_OFF(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) // TX start transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) unsigned int tx_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) unsigned int tx_iovcnt = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) // remember this so we can free it later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) dev->tx_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (tx_iovcnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) // scatter gather transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) dev->tx_regions = tx_iovcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) dev->tx_iovec = NULL; /* @@@ needs rewritten */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) dev->tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) PRINTD (DBG_TX|DBG_BUS, "TX start scatter-gather transfer (iovec %p, len %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) skb->data, tx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) tx_release (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) hrz_kfree_skb (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) // simple transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) dev->tx_regions = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) dev->tx_iovec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) dev->tx_bytes = tx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) dev->tx_addr = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) PRINTD (DBG_TX|DBG_BUS, "TX start simple transfer (addr %p, len %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) skb->data, tx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) // and do the business
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) tx_schedule (dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) /********** reset a card **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) static void hrz_reset (const hrz_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) u32 control_0_reg = rd_regl (dev, CONTROL_0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) // why not set RESET_HORIZON to one and wait for the card to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) // reassert that bit as zero? Like so:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) control_0_reg = control_0_reg & RESET_HORIZON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) wr_regl (dev, CONTROL_0_REG, control_0_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) while (control_0_reg & RESET_HORIZON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) control_0_reg = rd_regl (dev, CONTROL_0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) // old reset code retained:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) wr_regl (dev, CONTROL_0_REG, control_0_reg |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) RESET_ATM | RESET_RX | RESET_TX | RESET_HOST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) // just guessing here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) udelay (1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) wr_regl (dev, CONTROL_0_REG, control_0_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) /********** read the burnt in address **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) static void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) wr_regl (dev, CONTROL_0_REG, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) udelay (5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) static void CLOCK_IT (const hrz_dev *dev, u32 ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) // DI must be valid around rising SK edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) WRITE_IT_WAIT(dev, ctrl & ~SEEPROM_SK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) WRITE_IT_WAIT(dev, ctrl | SEEPROM_SK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) static u16 read_bia(const hrz_dev *dev, u16 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) u32 ctrl = rd_regl (dev, CONTROL_0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) const unsigned int addr_bits = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) const unsigned int data_bits = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) u16 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) ctrl &= ~(SEEPROM_CS | SEEPROM_SK | SEEPROM_DI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) WRITE_IT_WAIT(dev, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) // wake Serial EEPROM and send 110 (READ) command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) ctrl |= (SEEPROM_CS | SEEPROM_DI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) CLOCK_IT(dev, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) ctrl |= SEEPROM_DI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) CLOCK_IT(dev, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) ctrl &= ~SEEPROM_DI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) CLOCK_IT(dev, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) for (i=0; i<addr_bits; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (addr & (1 << (addr_bits-1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) ctrl |= SEEPROM_DI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) ctrl &= ~SEEPROM_DI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) CLOCK_IT(dev, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) addr = addr << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) // we could check that we have DO = 0 here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) ctrl &= ~SEEPROM_DI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) for (i=0;i<data_bits;i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) res = res >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) CLOCK_IT(dev, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (rd_regl (dev, CONTROL_0_REG) & SEEPROM_DO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) res |= (1 << (data_bits-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) ctrl &= ~(SEEPROM_SK | SEEPROM_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) WRITE_IT_WAIT(dev, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) /********** initialise a card **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) static int hrz_init(hrz_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) int onefivefive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) u16 chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) int buff_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) HDW * mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) cell_buf * tx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) cell_buf * rx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) ctrl = rd_regl (dev, CONTROL_0_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) PRINTD (DBG_INFO, "ctrl0reg is %#x", ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) onefivefive = ctrl & ATM_LAYER_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (onefivefive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) printk (DEV_LABEL ": Horizon Ultra (at 155.52 MBps)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) printk (DEV_LABEL ": Horizon (at 25 MBps)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) printk (":");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) // Reset the card to get everything in a known state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) printk (" reset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) hrz_reset (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) // Clear all the buffer memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) printk (" clearing memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) for (mem = (HDW *) memmap; mem < (HDW *) (memmap + 1); ++mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) wr_mem (dev, mem, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) printk (" tx channels");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) // All transmit eight channels are set up as AAL5 ABR channels with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) // a 16us cell spacing. Why?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) // Channel 0 gets the free buffer at 100h, channel 1 gets the free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) // buffer at 110h etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) for (chan = 0; chan < TX_CHANS; ++chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) tx_ch_desc * tx_desc = &memmap->tx_descs[chan];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) cell_buf * buf = &memmap->inittxbufs[chan];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) // initialise the read and write buffer pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) wr_mem (dev, &tx_desc->rd_buf_type, BUF_PTR(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) wr_mem (dev, &tx_desc->wr_buf_type, BUF_PTR(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) // set the status of the initial buffers to empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) wr_mem (dev, &buf->next, BUFF_STATUS_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) // Use space bufn3 at the moment for tx buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) printk (" tx buffers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) tx_desc = memmap->bufn3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) wr_mem (dev, &memmap->txfreebufstart.next, BUF_PTR(tx_desc) | BUFF_STATUS_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) for (buff_count = 0; buff_count < BUFN3_SIZE-1; buff_count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) wr_mem (dev, &tx_desc->next, BUF_PTR(tx_desc+1) | BUFF_STATUS_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) tx_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) wr_mem (dev, &tx_desc->next, BUF_PTR(&memmap->txfreebufend) | BUFF_STATUS_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) // Initialise the transmit free buffer count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) wr_regw (dev, TX_FREE_BUFFER_COUNT_OFF, BUFN3_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) printk (" rx channels");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) // Initialise all of the receive channels to be AAL5 disabled with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) // an interrupt threshold of 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) for (chan = 0; chan < RX_CHANS; ++chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) rx_ch_desc * rx_desc = &memmap->rx_descs[chan];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) wr_mem (dev, &rx_desc->wr_buf_type, CHANNEL_TYPE_AAL5 | RX_CHANNEL_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) printk (" rx buffers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) // Use space bufn4 at the moment for rx buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) rx_desc = memmap->bufn4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) wr_mem (dev, &memmap->rxfreebufstart.next, BUF_PTR(rx_desc) | BUFF_STATUS_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) for (buff_count = 0; buff_count < BUFN4_SIZE-1; buff_count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) wr_mem (dev, &rx_desc->next, BUF_PTR(rx_desc+1) | BUFF_STATUS_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) rx_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) wr_mem (dev, &rx_desc->next, BUF_PTR(&memmap->rxfreebufend) | BUFF_STATUS_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) // Initialise the receive free buffer count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) wr_regw (dev, RX_FREE_BUFFER_COUNT_OFF, BUFN4_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) // Initialize Horizons registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) // TX config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) wr_regw (dev, TX_CONFIG_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) ABR_ROUND_ROBIN | TX_NORMAL_OPERATION | DRVR_DRVRBAR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) // RX config. Use 10-x VC bits, x VP bits, non user cells in channel 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) wr_regw (dev, RX_CONFIG_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) DISCARD_UNUSED_VPI_VCI_BITS_SET | NON_USER_CELLS_IN_ONE_CHANNEL | vpi_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) // RX line config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) wr_regw (dev, RX_LINE_CONFIG_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) LOCK_DETECT_ENABLE | FREQUENCY_DETECT_ENABLE | GXTALOUT_SELECT_DIV4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) // Set the max AAL5 cell count to be just enough to contain the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) // largest AAL5 frame that the user wants to receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) DIV_ROUND_UP(max_rx_size + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) // Enable receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) printk (" control");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) // Drive the OE of the LEDs then turn the green LED on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) ctrl |= GREEN_LED_OE | YELLOW_LED_OE | GREEN_LED | YELLOW_LED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) wr_regl (dev, CONTROL_0_REG, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) // Test for a 155-capable card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (onefivefive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) // Select 155 mode... make this a choice (or: how do we detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) // external line speed and switch?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) ctrl |= ATM_LAYER_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) wr_regl (dev, CONTROL_0_REG, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) // test SUNI-lite vs SAMBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) // Register 0x00 in the SUNI will have some of bits 3-7 set, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) // they will always be zero for the SAMBA. Ha! Bloody hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) // engineers. It'll never work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (rd_framer (dev, 0) & 0x00f0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) // SUNI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) printk (" SUNI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) // Reset, just in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) wr_framer (dev, 0x00, 0x0080);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) wr_framer (dev, 0x00, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) // Configure transmit FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) wr_framer (dev, 0x63, rd_framer (dev, 0x63) | 0x0002);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) // Set line timed mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) wr_framer (dev, 0x05, rd_framer (dev, 0x05) | 0x0001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) // SAMBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) printk (" SAMBA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) // Reset, just in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) wr_framer (dev, 0, rd_framer (dev, 0) | 0x0001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) wr_framer (dev, 0, rd_framer (dev, 0) &~ 0x0001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) // Turn off diagnostic loopback and enable line-timed mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) wr_framer (dev, 0, 0x0002);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) // Turn on transmit outputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) wr_framer (dev, 2, 0x0B80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) // Select 25 mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) ctrl &= ~ATM_LAYER_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) // Madge B154 setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) // none required?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) printk (" LEDs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) GREEN_LED_ON(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) YELLOW_LED_ON(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) printk (" ESI=");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) u16 b = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) u8 * esi = dev->atm_dev->esi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) // in the card I have, EEPROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) // addresses 0, 1, 2 contain 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) // addresess 5, 6 etc. contain ffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) // NB: Madge prefix is 00 00 f6 (which is 00 00 6f in Ethernet bit order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) // the read_bia routine gets the BIA in Ethernet bit order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) for (i=0; i < ESI_LEN; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) if (i % 2 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) b = read_bia (dev, i/2 + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) b = b >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) esi[i] = b & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) printk ("%02x", esi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) // Enable RX_Q and ?X_COMPLETE interrupts only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) wr_regl (dev, INT_ENABLE_REG_OFF, INTERESTING_INTERRUPTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) printk (" IRQ on");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) printk (".\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) return onefivefive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) /********** check max_sdu **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) static int check_max_sdu (hrz_aal aal, struct atm_trafprm * tp, unsigned int max_frame_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) PRINTD (DBG_FLOW|DBG_QOS, "check_max_sdu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) switch (aal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) case aal0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (!(tp->max_sdu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) PRINTD (DBG_QOS, "defaulting max_sdu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) tp->max_sdu = ATM_AAL0_SDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) } else if (tp->max_sdu != ATM_AAL0_SDU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) PRINTD (DBG_QOS|DBG_ERR, "rejecting max_sdu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) case aal34:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (tp->max_sdu == 0 || tp->max_sdu > ATM_MAX_AAL34_PDU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) tp->max_sdu = ATM_MAX_AAL34_PDU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) case aal5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (tp->max_sdu == 0 || tp->max_sdu > max_frame_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) tp->max_sdu = max_frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /********** check pcr **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) // something like this should be part of ATM Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) static int atm_pcr_check (struct atm_trafprm * tp, unsigned int pcr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) // we are assuming non-UBR, and non-special values of pcr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (tp->min_pcr == ATM_MAX_PCR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) PRINTD (DBG_QOS, "luser gave min_pcr = ATM_MAX_PCR");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) else if (tp->min_pcr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) PRINTD (DBG_QOS, "luser gave negative min_pcr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) else if (tp->min_pcr && tp->min_pcr > pcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) PRINTD (DBG_QOS, "pcr less than min_pcr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) // !! max_pcr = UNSPEC (0) is equivalent to max_pcr = MAX (-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) // easier to #define ATM_MAX_PCR 0 and have all rates unsigned?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) // [this would get rid of next two conditionals]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if ((0) && tp->max_pcr == ATM_MAX_PCR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) PRINTD (DBG_QOS, "luser gave max_pcr = ATM_MAX_PCR");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) else if ((tp->max_pcr != ATM_MAX_PCR) && tp->max_pcr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) PRINTD (DBG_QOS, "luser gave negative max_pcr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) else if (tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && tp->max_pcr < pcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) PRINTD (DBG_QOS, "pcr greater than max_pcr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) // each limit unspecified or not violated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) PRINTD (DBG_QOS, "xBR(pcr) OK");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) PRINTD (DBG_QOS, "pcr=%u, tp: min_pcr=%d, pcr=%d, max_pcr=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) pcr, tp->min_pcr, tp->pcr, tp->max_pcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) /********** open VC **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) static int hrz_open (struct atm_vcc *atm_vcc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) u16 channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) struct atm_qos * qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) struct atm_trafprm * txtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) struct atm_trafprm * rxtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) hrz_vcc vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) hrz_vcc * vccp; // allocated late
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) short vpi = atm_vcc->vpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) int vci = atm_vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) PRINTD (DBG_FLOW|DBG_VCC, "hrz_open %x %x", vpi, vci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) #ifdef ATM_VPI_UNSPEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) // UNSPEC is deprecated, remove this code eventually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) error = vpivci_to_channel (&channel, vpi, vci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) vcc.channel = channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) // max speed for the moment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) vcc.tx_rate = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) qos = &atm_vcc->qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) // check AAL and remember it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) switch (qos->aal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) case ATM_AAL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) // we would if it were 48 bytes and not 52!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) PRINTD (DBG_QOS|DBG_VCC, "AAL0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) vcc.aal = aal0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) case ATM_AAL34:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) // we would if I knew how do the SAR!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) PRINTD (DBG_QOS|DBG_VCC, "AAL3/4");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) vcc.aal = aal34;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) case ATM_AAL5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) PRINTD (DBG_QOS|DBG_VCC, "AAL5");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) vcc.aal = aal5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) // TX traffic parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) // there are two, interrelated problems here: 1. the reservation of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) // PCR is not a binary choice, we are given bounds and/or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) // desirable value; 2. the device is only capable of certain values,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) // most of which are not integers. It is almost certainly acceptable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) // to be off by a maximum of 1 to 10 cps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) // Pragmatic choice: always store an integral PCR as that which has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) // been allocated, even if we allocate a little (or a lot) less,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) // after rounding. The actual allocation depends on what we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) // manage with our rate selection algorithm. The rate selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) // algorithm is given an integral PCR and a tolerance and told
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) // whether it should round the value up or down if the tolerance is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) // exceeded; it returns: a) the actual rate selected (rounded up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) // the nearest integer), b) a bit pattern to feed to the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) // register, and c) a failure value if no applicable rate exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) // Part of the job is done by atm_pcr_goal which gives us a PCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) // specification which says: EITHER grab the maximum available PCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) // (and perhaps a lower bound which we musn't pass), OR grab this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) // amount, rounding down if you have to (and perhaps a lower bound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) // which we musn't pass) OR grab this amount, rounding up if you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) // have to (and perhaps an upper bound which we musn't pass). If any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) // bounds ARE passed we fail. Note that rounding is only rounding to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) // match device limitations, we do not round down to satisfy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) // bandwidth availability even if this would not violate any given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) // lower bound.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) // Note: telephony = 64kb/s = 48 byte cell payload @ 500/3 cells/s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) // (say) so this is not even a binary fixpoint cell rate (but this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) // device can do it). To avoid this sort of hassle we use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) // tolerance parameter (currently fixed at 10 cps).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) PRINTD (DBG_QOS, "TX:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) txtp = &qos->txtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) // set up defaults for no traffic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) vcc.tx_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) // who knows what would actually happen if you try and send on this?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) vcc.tx_xbr_bits = IDLE_RATE_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) vcc.tx_pcr_bits = CLOCK_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) vcc.tx_scr_bits = CLOCK_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) vcc.tx_bucket_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (txtp->traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) error = check_max_sdu (vcc.aal, txtp, max_tx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) PRINTD (DBG_QOS, "TX max_sdu check failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) switch (txtp->traffic_class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) case ATM_UBR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) // we take "the PCR" as a rate-cap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) // not reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) vcc.tx_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) vcc.tx_xbr_bits = ABR_RATE_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) case ATM_ABR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) // reserve min, allow up to max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) vcc.tx_rate = 0; // ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) vcc.tx_xbr_bits = ABR_RATE_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) case ATM_CBR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) int pcr = atm_pcr_goal (txtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) rounding r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) if (!pcr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) // down vs. up, remaining bandwidth vs. unlimited bandwidth!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) // should really have: once someone gets unlimited bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) // that no more non-UBR channels can be opened until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) // unlimited one closes?? For the moment, round_down means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) // greedy people actually get something and not nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) r = round_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) // slight race (no locking) here so we may get -EAGAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) // later; the greedy bastards would deserve it :)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) PRINTD (DBG_QOS, "snatching all remaining TX bandwidth");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) pcr = dev->tx_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) } else if (pcr < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) r = round_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) pcr = -pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) r = round_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) error = make_rate_with_tolerance (dev, pcr, r, 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) &vcc.tx_pcr_bits, &vcc.tx_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) PRINTD (DBG_QOS, "could not make rate from TX PCR");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) // not really clear what further checking is needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) error = atm_pcr_check (txtp, vcc.tx_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) PRINTD (DBG_QOS, "TX PCR failed consistency check");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) vcc.tx_xbr_bits = CBR_RATE_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) case ATM_VBR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) int pcr = atm_pcr_goal (txtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) // int scr = atm_scr_goal (txtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) int scr = pcr/2; // just for fun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) unsigned int mbs = 60; // just for fun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) rounding pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) rounding sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) unsigned int bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) if (!pcr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) pr = round_nearest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) pcr = 1<<30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) } else if (pcr < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) pr = round_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) pcr = -pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) pr = round_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) error = make_rate_with_tolerance (dev, pcr, pr, 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) &vcc.tx_pcr_bits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if (!scr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) // see comments for PCR with CBR above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) sr = round_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) // slight race (no locking) here so we may get -EAGAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) // later; the greedy bastards would deserve it :)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) PRINTD (DBG_QOS, "snatching all remaining TX bandwidth");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) scr = dev->tx_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) } else if (scr < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) sr = round_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) scr = -scr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) sr = round_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) error = make_rate_with_tolerance (dev, scr, sr, 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) &vcc.tx_scr_bits, &vcc.tx_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) PRINTD (DBG_QOS, "could not make rate from TX SCR");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) // not really clear what further checking is needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) // error = atm_scr_check (txtp, vcc.tx_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) PRINTD (DBG_QOS, "TX SCR failed consistency check");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) // bucket calculations (from a piece of paper...) cell bucket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) // capacity must be largest integer smaller than m(p-s)/p + 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) // where m = max burst size, p = pcr, s = scr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) bucket = mbs*(pcr-scr)/pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) if (bucket*pcr != mbs*(pcr-scr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) bucket += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) if (bucket > BUCKET_MAX_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) PRINTD (DBG_QOS, "shrinking bucket from %u to %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) bucket, BUCKET_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) bucket = BUCKET_MAX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) vcc.tx_xbr_bits = VBR_RATE_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) vcc.tx_bucket_bits = bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) default: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) PRINTD (DBG_QOS, "unsupported TX traffic class");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) // RX traffic parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) PRINTD (DBG_QOS, "RX:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) rxtp = &qos->rxtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) // set up defaults for no traffic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) vcc.rx_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if (rxtp->traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) error = check_max_sdu (vcc.aal, rxtp, max_rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) PRINTD (DBG_QOS, "RX max_sdu check failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) switch (rxtp->traffic_class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) case ATM_UBR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) // not reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) case ATM_ABR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) // reserve min
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) vcc.rx_rate = 0; // ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) case ATM_CBR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) int pcr = atm_pcr_goal (rxtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if (!pcr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) // slight race (no locking) here so we may get -EAGAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) // later; the greedy bastards would deserve it :)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) PRINTD (DBG_QOS, "snatching all remaining RX bandwidth");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) pcr = dev->rx_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) } else if (pcr < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) pcr = -pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) vcc.rx_rate = pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) // not really clear what further checking is needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) error = atm_pcr_check (rxtp, vcc.rx_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) PRINTD (DBG_QOS, "RX PCR failed consistency check");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) case ATM_VBR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) // int scr = atm_scr_goal (rxtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) int scr = 1<<16; // just for fun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if (!scr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) // slight race (no locking) here so we may get -EAGAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) // later; the greedy bastards would deserve it :)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) PRINTD (DBG_QOS, "snatching all remaining RX bandwidth");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) scr = dev->rx_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) } else if (scr < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) scr = -scr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) vcc.rx_rate = scr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) // not really clear what further checking is needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) // error = atm_scr_check (rxtp, vcc.rx_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) PRINTD (DBG_QOS, "RX SCR failed consistency check");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) default: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) PRINTD (DBG_QOS, "unsupported RX traffic class");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) // late abort useful for diagnostics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (vcc.aal != aal5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) PRINTD (DBG_QOS, "AAL not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) // get space for our vcc stuff and copy parameters into it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) vccp = kmalloc (sizeof(hrz_vcc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) if (!vccp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) PRINTK (KERN_ERR, "out of memory!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) *vccp = vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) // clear error and grab cell rate resource lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) spin_lock (&dev->rate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) if (vcc.tx_rate > dev->tx_avail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) PRINTD (DBG_QOS, "not enough TX PCR left");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (vcc.rx_rate > dev->rx_avail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) PRINTD (DBG_QOS, "not enough RX PCR left");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) // really consume cell rates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) dev->tx_avail -= vcc.tx_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) dev->rx_avail -= vcc.rx_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) PRINTD (DBG_QOS|DBG_VCC, "reserving %u TX PCR and %u RX PCR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) vcc.tx_rate, vcc.rx_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) // release lock and exit on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) spin_unlock (&dev->rate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) PRINTD (DBG_QOS|DBG_VCC, "insufficient cell rate resources");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) kfree (vccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) // this is "immediately before allocating the connection identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) // in hardware" - so long as the next call does not fail :)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) set_bit(ATM_VF_ADDR,&atm_vcc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) // any errors here are very serious and should never occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) if (rxtp->traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) if (dev->rxer[channel]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) PRINTD (DBG_ERR|DBG_VCC, "VC already open for RX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) error = hrz_open_rx (dev, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) kfree (vccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) // this link allows RX frames through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) dev->rxer[channel] = atm_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) // success, set elements of atm_vcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) atm_vcc->dev_data = (void *) vccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) // indicate readiness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) set_bit(ATM_VF_READY,&atm_vcc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) /********** close VC **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) static void hrz_close (struct atm_vcc * atm_vcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) hrz_vcc * vcc = HRZ_VCC(atm_vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) u16 channel = vcc->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) PRINTD (DBG_VCC|DBG_FLOW, "hrz_close");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) // indicate unreadiness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) clear_bit(ATM_VF_READY,&atm_vcc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) // let any TX on this channel that has started complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) // no restart, just keep trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) while (tx_hold (dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) // remove record of any tx_channel having been setup for this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) for (i = 0; i < TX_CHANS; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) if (dev->tx_channel_record[i] == channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) dev->tx_channel_record[i] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) if (dev->last_vc == channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) dev->tx_last = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) tx_release (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) // disable RXing - it tries quite hard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) hrz_close_rx (dev, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) // forget the vcc - no more skbs will be pushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) if (atm_vcc != dev->rxer[channel])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) PRINTK (KERN_ERR, "%s atm_vcc=%p rxer[channel]=%p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) "arghhh! we're going to die!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) atm_vcc, dev->rxer[channel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) dev->rxer[channel] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) // atomically release our rate reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) spin_lock (&dev->rate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) PRINTD (DBG_QOS|DBG_VCC, "releasing %u TX PCR and %u RX PCR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) vcc->tx_rate, vcc->rx_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) dev->tx_avail += vcc->tx_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) dev->rx_avail += vcc->rx_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) spin_unlock (&dev->rate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) // free our structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) kfree (vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) // say the VPI/VCI is free again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) clear_bit(ATM_VF_ADDR,&atm_vcc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) static int hrz_ioctl (struct atm_dev * atm_dev, unsigned int cmd, void *arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) hrz_dev * dev = HRZ_DEV(atm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) PRINTD (DBG_FLOW, "hrz_ioctl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) unsigned char hrz_phy_get (struct atm_dev * atm_dev, unsigned long addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) hrz_dev * dev = HRZ_DEV(atm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) PRINTD (DBG_FLOW, "hrz_phy_get");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) static void hrz_phy_put (struct atm_dev * atm_dev, unsigned char value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) unsigned long addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) hrz_dev * dev = HRZ_DEV(atm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) PRINTD (DBG_FLOW, "hrz_phy_put");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) static int hrz_change_qos (struct atm_vcc * atm_vcc, struct atm_qos *qos, int flgs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) hrz_dev * dev = HRZ_DEV(vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) PRINTD (DBG_FLOW, "hrz_change_qos");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) /********** proc file contents **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) static int hrz_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) hrz_dev * dev = HRZ_DEV(atm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) int left = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) PRINTD (DBG_FLOW, "hrz_proc_read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) /* more diagnostics here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) if (!left--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) unsigned int count = sprintf (page, "vbr buckets:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) for (i = 0; i < TX_CHANS; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) count += sprintf (page, " %u/%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) query_tx_channel_config (dev, i, BUCKET_FULLNESS_ACCESS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) query_tx_channel_config (dev, i, BUCKET_CAPACITY_ACCESS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) count += sprintf (page+count, ".\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) if (!left--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) return sprintf (page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) "cells: TX %lu, RX %lu, HEC errors %lu, unassigned %lu.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) dev->tx_cell_count, dev->rx_cell_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) dev->hec_error_count, dev->unassigned_cell_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) if (!left--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) return sprintf (page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) "free cell buffers: TX %hu, RX %hu+%hu.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) rd_regw (dev, RX_FREE_BUFFER_COUNT_OFF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) dev->noof_spare_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (!left--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) return sprintf (page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) "cps remaining: TX %u, RX %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) dev->tx_avail, dev->rx_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) static const struct atmdev_ops hrz_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) .open = hrz_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) .close = hrz_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) .send = hrz_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) .proc_read = hrz_proc_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) static int hrz_probe(struct pci_dev *pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) const struct pci_device_id *pci_ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) hrz_dev * dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) // adapter slot free, read resources from PCI configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) u32 iobase = pci_resource_start (pci_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) u32 * membase = bus_to_virt (pci_resource_start (pci_dev, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) unsigned char lat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) PRINTD (DBG_FLOW, "hrz_probe");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) if (pci_enable_device(pci_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) /* XXX DEV_LABEL is a guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) goto out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) dev = kzalloc(sizeof(hrz_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) // perhaps we should be nice: deregister all adapters and abort?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) PRINTD(DBG_ERR, "out of memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) pci_set_drvdata(pci_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) // grab IRQ and install handler - move this someplace more sensible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) irq = pci_dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) if (request_irq(irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) interrupt_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) IRQF_SHARED, /* irqflags guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) DEV_LABEL, /* name guess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) PRINTD(DBG_WARN, "request IRQ failed!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) PRINTD(DBG_INFO, "found Madge ATM adapter (hrz) at: IO %x, IRQ %u, MEM %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) iobase, irq, membase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) dev->atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &hrz_ops, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) if (!(dev->atm_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) PRINTD(DBG_ERR, "failed to register Madge ATM adapter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) goto out_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) PRINTD(DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) dev->atm_dev->number, dev, dev->atm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) dev->atm_dev->dev_data = (void *) dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) dev->pci_dev = pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) // enable bus master accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) pci_set_master(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) // frobnicate latency (upwards, usually)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &lat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) if (pci_lat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) PRINTD(DBG_INFO, "%s PCI latency timer from %hu to %hu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) "changing", lat, pci_lat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) } else if (lat < MIN_PCI_LATENCY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) PRINTK(KERN_INFO, "%s PCI latency timer from %hu to %hu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) "increasing", lat, MIN_PCI_LATENCY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, MIN_PCI_LATENCY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) dev->iobase = iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) dev->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) dev->membase = membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) dev->rx_q_entry = dev->rx_q_reset = &memmap->rx_q_entries[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) dev->rx_q_wrap = &memmap->rx_q_entries[RX_CHANS-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) // these next three are performance hacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) dev->last_vc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) dev->tx_last = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) dev->tx_idle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) dev->tx_regions = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) dev->tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) dev->tx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) dev->tx_iovec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) dev->tx_cell_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) dev->rx_cell_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) dev->hec_error_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) dev->unassigned_cell_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) dev->noof_spare_buffers = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) for (i = 0; i < TX_CHANS; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) dev->tx_channel_record[i] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) dev->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) // Allocate cell rates and remember ASIC version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) // Fibre: ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) // Copper: (WRONG) we want 6 into the above, close to 25Mb/s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) // Copper: (plagarise!) 25600000/8/270*260/53 - n/53
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) if (hrz_init(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) // to be really pedantic, this should be ATM_OC3c_PCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) dev->tx_avail = ATM_OC3_PCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) dev->rx_avail = ATM_OC3_PCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) set_bit(ultra, &dev->flags); // NOT "|= ultra" !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) dev->tx_avail = ((25600000/8)*26)/(27*53);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) dev->rx_avail = ((25600000/8)*26)/(27*53);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) PRINTD(DBG_WARN, "Buggy ASIC: no TX bus-mastering.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) // rate changes spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) spin_lock_init(&dev->rate_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) // on-board memory access spinlock; we want atomic reads and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) // writes to adapter memory (handles IRQ and SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) spin_lock_init(&dev->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) init_waitqueue_head(&dev->tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) // vpi in 0..4, vci in 6..10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) dev->atm_dev->ci_range.vpi_bits = vpi_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) timer_setup(&dev->housekeeping, do_housekeeping, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) mod_timer(&dev->housekeeping, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) out_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) free_irq(irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) out_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) release_region(iobase, HRZ_IO_EXTENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) out_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) pci_disable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) static void hrz_remove_one(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) hrz_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) dev = pci_get_drvdata(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) PRINTD(DBG_INFO, "closing %p (atm_dev = %p)", dev, dev->atm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) del_timer_sync(&dev->housekeeping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) hrz_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) atm_dev_deregister(dev->atm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) release_region(dev->iobase, HRZ_IO_EXTENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) pci_disable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) static void __init hrz_check_args (void) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) #ifdef DEBUG_HORIZON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) if (debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) PRINTK (KERN_NOTICE, "no debug support in this image");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) if (vpi_bits > HRZ_MAX_VPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) PRINTK (KERN_ERR, "vpi_bits has been limited to %hu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) vpi_bits = HRZ_MAX_VPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) if (max_tx_size < 0 || max_tx_size > TX_AAL5_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) PRINTK (KERN_NOTICE, "max_tx_size has been limited to %hu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) max_tx_size = TX_AAL5_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if (max_rx_size < 0 || max_rx_size > RX_AAL5_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) PRINTK (KERN_NOTICE, "max_rx_size has been limited to %hu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) max_rx_size = RX_AAL5_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) MODULE_AUTHOR(maintainer_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) MODULE_DESCRIPTION(description_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) module_param(debug, ushort, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) module_param(vpi_bits, ushort, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) module_param(max_tx_size, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) module_param(max_rx_size, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) module_param(pci_lat, byte, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) MODULE_PARM_DESC(debug, "debug bitmap, see .h file");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) MODULE_PARM_DESC(vpi_bits, "number of bits (0..4) to allocate to VPIs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) static const struct pci_device_id hrz_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) { 0, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) MODULE_DEVICE_TABLE(pci, hrz_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) static struct pci_driver hrz_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) .name = "horizon",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) .probe = hrz_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) .remove = hrz_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) .id_table = hrz_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) /********** module entry **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) static int __init hrz_module_init (void) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) BUILD_BUG_ON(sizeof(struct MEMMAP) != 128*1024/4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) show_version();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) // check arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) hrz_check_args();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) // get the juice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) return pci_register_driver(&hrz_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) /********** module exit **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) static void __exit hrz_module_exit (void) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) PRINTD (DBG_FLOW, "cleanup_module");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) pci_unregister_driver(&hrz_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) module_init(hrz_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) module_exit(hrz_module_exit);