^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) Madge Ambassador ATM Adapter driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) Copyright (C) 1995-1999 Madge Networks Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /* * dedicated to the memory of Graham Gordon 1971-1998 * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/atmdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/poison.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/bitrev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/ihex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "ambassador.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define description_string "Madge ATM Ambassador driver"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define version_string "1.2.4"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static inline void __init show_version (void) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) printk ("%s version %s\n", description_string, version_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) Theory of Operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) I Hardware, detection, initialisation and shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) 1. Supported Hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) This driver is for the PCI ATMizer-based Ambassador card (except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) very early versions). It is not suitable for the similar EISA "TR7"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) card. Commercially, both cards are known as Collage Server ATM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) adapters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) The loader supports image transfer to the card, image start and few
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) other miscellaneous commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) Only AAL5 is supported with vpi = 0 and vci in the range 0 to 1023.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) The cards are big-endian.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) 2. Detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) Standard PCI stuff, the early cards are detected and rejected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) 3. Initialisation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) The cards are reset and the self-test results are checked. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) microcode image is then transferred and started. This waits for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) pointer to a descriptor containing details of the host-based queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) and buffers and various parameters etc. Once they are processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) normal operations may begin. The BIA is read using a microcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) 4. Shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) This may be accomplished either by a card reset or via the microcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) shutdown command. Further investigation required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) 5. Persistent state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) The card reset does not affect PCI configuration (good) or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) contents of several other "shared run-time registers" (bad) which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) include doorbell and interrupt control as well as EEPROM and PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) control. The driver must be careful when modifying these registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) not to touch bits it does not use and to undo any changes at exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) II Driver software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) 0. Generalities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) The adapter is quite intelligent (fast) and has a simple interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) (few features). VPI is always zero, 1024 VCIs are supported. There
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) is limited cell rate support. UBR channels can be capped and ABR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) (explicit rate, but not EFCI) is supported. There is no CBR or VBR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) 1. Driver <-> Adapter Communication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) Apart from the basic loader commands, the driver communicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) through three entities: the command queue (CQ), the transmit queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) pair (TXQ) and the receive queue pairs (RXQ). These three entities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) are set up by the host and passed to the microcode just after it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) been started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) All queues are host-based circular queues. They are contiguous and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) (due to hardware limitations) have some restrictions as to their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) locations in (bus) memory. They are of the "full means the same as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) empty so don't do that" variety since the adapter uses pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) internally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) The queue pairs work as follows: one queue is for supply to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) adapter, items in it are pending and are owned by the adapter; the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) other is the queue for return from the adapter, items in it have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) been dealt with by the adapter. The host adds items to the supply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) (TX descriptors and free RX buffer descriptors) and removes items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) from the return (TX and RX completions). The adapter deals with out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) of order completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) Interrupts (card to host) and the doorbell (host to card) are used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) for signalling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 1. CQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) This is to communicate "open VC", "close VC", "get stats" etc. to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) the adapter. At most one command is retired every millisecond by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) card. There is no out of order completion or notification. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) driver needs to check the return code of the command, waiting as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) appropriate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 2. TXQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) TX supply items are of variable length (scatter gather support) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) so the queue items are (more or less) pointers to the real thing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) Each TX supply item contains a unique, host-supplied handle (the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) bus address seems most sensible as this works for Alphas as well,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) there is no need to do any endian conversions on the handles).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) TX return items consist of just the handles above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 3. RXQ (up to 4 of these with different lengths and buffer sizes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) RX supply items consist of a unique, host-supplied handle (the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) bus address again) and a pointer to the buffer data area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) RX return items consist of the handle above, the VC, length and a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) status word. This just screams "oh so easy" doesn't it?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) Note on RX pool sizes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) Each pool should have enough buffers to handle a back-to-back stream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) of minimum sized frames on a single VC. For example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) frame spacing = 3us (about right)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) delay = IRQ lat + RX handling + RX buffer replenish = 20 (us) (a guess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) min number of buffers for one VC = 1 + delay/spacing (buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) delay/spacing = latency = (20+2)/3 = 7 (buffers) (rounding up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) The 20us delay assumes that there is no need to sleep; if we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) sleep to get buffers we are going to drop frames anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) In fact, each pool should have enough buffers to support the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) simultaneous reassembly of a separate frame on each VC and cope with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) the case in which frames complete in round robin cell fashion on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) each VC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) Only one frame can complete at each cell arrival, so if "n" VCs are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) open, the worst case is to have them all complete frames together
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) followed by all starting new frames together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) desired number of buffers = n + delay/spacing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) These are the extreme requirements, however, they are "n+k" for some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) "k" so we have only the constant to choose. This is the argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) rx_lats which current defaults to 7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) Actually, "n ? n+k : 0" is better and this is what is implemented,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) subject to the limit given by the pool size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 4. Driver locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) Simple spinlocks are used around the TX and RX queue mechanisms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) Anyone with a faster, working method is welcome to implement it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) The adapter command queue is protected with a spinlock. We always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) wait for commands to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) A more complex form of locking is used around parts of the VC open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) and close functions. There are three reasons for a lock: 1. we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) to do atomic rate reservation and release (not used yet), 2. Opening
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) sometimes involves two adapter commands which must not be separated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) by another command on the same VC, 3. the changes to RX pool size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) must be atomic. The lock needs to work over context switches, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) use a semaphore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) III Hardware Features and Microcode Bugs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 1. Byte Ordering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *%^"$&%^$*&^"$(%^$#&^%$(&#%$*(&^#%!"!"!*!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 2. Memory access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) All structures that are not accessed using DMA must be 4-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) aligned (not a problem) and must not cross 4MB boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) There is a DMA memory hole at E0000000-E00000FF (groan).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) TX fragments (DMA read) must not cross 4MB boundaries (would be 16MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) but for a hardware bug).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) RX buffers (DMA write) must not cross 16MB boundaries and must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) include spare trailing bytes up to the next 4-byte boundary; they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) will be written with rubbish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) The PLX likes to prefetch; if reading up to 4 u32 past the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) each TX fragment is not a problem, then TX can be made to go a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) little faster by passing a flag at init that disables a prefetch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) workaround. We do not pass this flag. (new microcode only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) Now we:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) . Note that alloc_skb rounds up size to a 16byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) . Ensure all areas do not traverse 4MB boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) . Ensure all areas do not start at a E00000xx bus address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) (I cannot be certain, but this may always hold with Linux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) . Make all failures cause a loud message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) . Discard non-conforming SKBs (causes TX failure or RX fill delay).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) . Discard non-conforming TX fragment descriptors (the TX fails).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) In the future we could:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) . Allow RX areas that traverse 4MB (but not 16MB) boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) . Segment TX areas into some/more fragments, when necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) . Relax checks for non-DMA items (ignore hole).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) . Give scatter-gather (iovec) requirements using ???. (?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 3. VC close is broken (only for new microcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) The VC close adapter microcode command fails to do anything if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) frames have been received on the VC but none have been transmitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) Frames continue to be reassembled and passed (with IRQ) to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) IV To Do List
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) . Fix bugs!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) . Timer code may be broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) . Deal with buggy VC close (somehow) in microcode 12.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) . Handle interrupted and/or non-blocking writes - is this a job for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) the protocol layer?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) . Add code to break up TX fragments when they span 4MB boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) . Add SUNI phy layer (need to know where SUNI lives on card).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) . Implement a tx_alloc fn to (a) satisfy TX alignment etc. and (b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) leave extra headroom space for Ambassador TX descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) . Understand these elements of struct atm_vcc: recvq (proto?),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) sleep, callback, listenq, backlog_quota, reply and user_back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) . Adjust TX/RX skb allocation to favour IP with LANE/CLIP (configurable).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) . Impose a TX-pending limit (2?) on each VC, help avoid TX q overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) . Decide whether RX buffer recycling is or can be made completely safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) turn it back on. It looks like Werner is going to axe this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) . Implement QoS changes on open VCs (involves extracting parts of VC open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) and close into separate functions and using them to make changes).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) . Hack on command queue so that someone can issue multiple commands and wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) on the last one (OR only "no-op" or "wait" commands are waited for).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) . Eliminate need for while-schedule around do_command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static void do_housekeeping (struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /********** globals **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static unsigned short debug = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static unsigned int cmds = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static unsigned int txs = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static unsigned int rxs[NUM_RX_POOLS] = { 64, 64, 64, 64 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static unsigned int rxs_bs[NUM_RX_POOLS] = { 4080, 12240, 36720, 65535 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static unsigned int rx_lats = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static unsigned char pci_lat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static const unsigned long onegigmask = -1 << 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /********** access to adapter **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static inline void wr_plain (const amb_dev * dev, size_t addr, u32 data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x", addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #ifdef AMB_MMIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dev->membase[addr / sizeof(u32)] = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) outl (data, dev->iobase + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static inline u32 rd_plain (const amb_dev * dev, size_t addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #ifdef AMB_MMIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) u32 data = dev->membase[addr / sizeof(u32)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) u32 data = inl (dev->iobase + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x", addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static inline void wr_mem (const amb_dev * dev, size_t addr, u32 data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) __be32 be = cpu_to_be32 (data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x b[%08x]", addr, data, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #ifdef AMB_MMIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dev->membase[addr / sizeof(u32)] = be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) outl (be, dev->iobase + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static inline u32 rd_mem (const amb_dev * dev, size_t addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #ifdef AMB_MMIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) __be32 be = dev->membase[addr / sizeof(u32)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) __be32 be = inl (dev->iobase + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) u32 data = be32_to_cpu (be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x b[%08x]", addr, data, be);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /********** dump routines **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static inline void dump_registers (const amb_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #ifdef DEBUG_AMBASSADOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (debug & DBG_REGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) PRINTD (DBG_REGS, "reading PLX control: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) for (i = 0x00; i < 0x30; i += sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rd_mem (dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) PRINTD (DBG_REGS, "reading mailboxes: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) for (i = 0x40; i < 0x60; i += sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) rd_mem (dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) PRINTD (DBG_REGS, "reading doorb irqev irqen reset:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) for (i = 0x60; i < 0x70; i += sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) rd_mem (dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) (void) dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static inline void dump_loader_block (volatile loader_block * lb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) #ifdef DEBUG_AMBASSADOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) PRINTDB (DBG_LOAD, "lb @ %p; res: %d, cmd: %d, pay:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) lb, be32_to_cpu (lb->result), be32_to_cpu (lb->command));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) for (i = 0; i < MAX_COMMAND_DATA; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) PRINTDM (DBG_LOAD, " %08x", be32_to_cpu (lb->payload.data[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) PRINTDE (DBG_LOAD, ", vld: %08x", be32_to_cpu (lb->valid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) (void) lb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static inline void dump_command (command * cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #ifdef DEBUG_AMBASSADOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) PRINTDB (DBG_CMD, "cmd @ %p, req: %08x, pars:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) cmd, /*be32_to_cpu*/ (cmd->request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) for (i = 0; i < 3; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) PRINTDM (DBG_CMD, " %08x", /*be32_to_cpu*/ (cmd->args.par[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) PRINTDE (DBG_CMD, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) (void) cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #ifdef DEBUG_AMBASSADOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) unsigned char * data = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) for (i=0; i<skb->len && i < 256;i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) PRINTDM (DBG_DATA, "%02x ", data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) PRINTDE (DBG_DATA,"");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) (void) prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) (void) vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) (void) skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /********** check memory areas for use by Ambassador **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* see limitations under Hardware Features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static int check_area (void * start, size_t length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) // assumes length > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) const u32 fourmegmask = -1 << 22;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) const u32 twofivesixmask = -1 << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) const u32 starthole = 0xE0000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u32 startaddress = virt_to_bus (start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) u32 lastaddress = startaddress+length-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if ((startaddress ^ lastaddress) & fourmegmask ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) (startaddress & twofivesixmask) == starthole) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) PRINTK (KERN_ERR, "check_area failure: [%x,%x] - mail maintainer!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) startaddress, lastaddress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /********** free an skb (as per ATM device driver documentation) **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void amb_kfree_skb (struct sk_buff * skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (ATM_SKB(skb)->vcc->pop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dev_kfree_skb_any (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /********** TX completion **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void tx_complete (amb_dev * dev, tx_out * tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) tx_simple * tx_descr = bus_to_virt (tx->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct sk_buff * skb = tx_descr->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) // VC layer stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) // free the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) kfree (tx_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) // free the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) amb_kfree_skb (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) dev->stats.tx_ok++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /********** RX completion **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static void rx_complete (amb_dev * dev, rx_out * rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct sk_buff * skb = bus_to_virt (rx->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) u16 vc = be16_to_cpu (rx->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) // unused: u16 lec_id = be16_to_cpu (rx->lec_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) u16 status = be16_to_cpu (rx->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) u16 rx_len = be16_to_cpu (rx->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) PRINTD (DBG_FLOW|DBG_RX, "rx_complete %p %p (len=%hu)", dev, rx, rx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) // XXX move this in and add to VC stats ???
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct atm_vcc * atm_vcc = dev->rxer[vc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) dev->stats.rx.ok++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (atm_vcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (rx_len <= atm_vcc->qos.rxtp.max_sdu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (atm_charge (atm_vcc, skb->truesize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) // prepare socket buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) ATM_SKB(skb)->vcc = atm_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) skb_put (skb, rx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) dump_skb ("<<<", vc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) // VC layer stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) atomic_inc(&atm_vcc->stats->rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) __net_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) // end of our responsibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) atm_vcc->push (atm_vcc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) // someone fix this (message), please!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) PRINTD (DBG_INFO|DBG_RX, "dropped thanks to atm_charge (vc %hu, truesize %u)", vc, skb->truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) // drop stats incremented in atm_charge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) PRINTK (KERN_INFO, "dropped over-size frame");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) // should we count this?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) atomic_inc(&atm_vcc->stats->rx_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) PRINTD (DBG_WARN|DBG_RX, "got frame but RX closed for channel %hu", vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) // this is an adapter bug, only in new version of microcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) dev->stats.rx.error++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (status & CRC_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dev->stats.rx.badcrc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (status & LEN_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) dev->stats.rx.toolong++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (status & ABORT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) dev->stats.rx.aborted++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (status & UNUSED_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) dev->stats.rx.unused++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dev_kfree_skb_any (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) Note on queue handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) Here "give" and "take" refer to queue entries and a queue (pair)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) rather than frames to or from the host or adapter. Empty frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) buffers are given to the RX queue pair and returned unused or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) containing RX frames. TX frames (well, pointers to TX fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) lists) are given to the TX queue pair, completions are returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /********** command queue **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) // I really don't like this, but it's the best I can do at the moment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) // also, the callers are responsible for byte order as the microcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) // sometimes does 16-bit accesses (yuk yuk yuk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static int command_do (amb_dev * dev, command * cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) amb_cq * cq = &dev->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) volatile amb_cq_ptrs * ptrs = &cq->ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) command * my_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) PRINTD (DBG_FLOW|DBG_CMD, "command_do %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (test_bit (dead, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) spin_lock (&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) // if not full...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (cq->pending < cq->maximum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) // remember my slot for later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) my_slot = ptrs->in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) PRINTD (DBG_CMD, "command in slot %p", my_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) dump_command (cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) // copy command in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) *ptrs->in = *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) cq->pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ptrs->in = NEXTQ (ptrs->in, ptrs->start, ptrs->limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) // mail the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) wr_mem (dev, offsetof(amb_mem, mb.adapter.cmd_address), virt_to_bus (ptrs->in));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (cq->pending > cq->high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) cq->high = cq->pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) spin_unlock (&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) // these comments were in a while-loop before, msleep removes the loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) // go to sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) // PRINTD (DBG_CMD, "wait: sleeping %lu for command", timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) msleep(cq->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) // wait for my slot to be reached (all waiters are here or above, until...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) while (ptrs->out != my_slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) PRINTD (DBG_CMD, "wait: command slot (now at %p)", ptrs->out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) // wait on my slot (... one gets to its slot, and... )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) while (ptrs->out->request != cpu_to_be32 (SRB_COMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) PRINTD (DBG_CMD, "wait: command slot completion");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) PRINTD (DBG_CMD, "command complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) // update queue (... moves the queue along to the next slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) spin_lock (&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) cq->pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) // copy command out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) *cmd = *ptrs->out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ptrs->out = NEXTQ (ptrs->out, ptrs->start, ptrs->limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) spin_unlock (&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) cq->filled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) spin_unlock (&cq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /********** TX queue pair **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) static int tx_give (amb_dev * dev, tx_in * tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) amb_txq * txq = &dev->txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) PRINTD (DBG_FLOW|DBG_TX, "tx_give %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (test_bit (dead, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) spin_lock_irqsave (&txq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (txq->pending < txq->maximum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) *txq->in.ptr = *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) txq->pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) // hand over the TX and ring the bell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) wr_mem (dev, offsetof(amb_mem, doorbell), TX_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (txq->pending > txq->high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) txq->high = txq->pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) spin_unlock_irqrestore (&txq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) txq->filled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) spin_unlock_irqrestore (&txq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static int tx_take (amb_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) amb_txq * txq = &dev->txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) PRINTD (DBG_FLOW|DBG_TX, "tx_take %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) spin_lock_irqsave (&txq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (txq->pending && txq->out.ptr->handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) // deal with TX completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) tx_complete (dev, txq->out.ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) // mark unused again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) txq->out.ptr->handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) // remove item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) txq->pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) txq->out.ptr = NEXTQ (txq->out.ptr, txq->out.start, txq->out.limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) spin_unlock_irqrestore (&txq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) spin_unlock_irqrestore (&txq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /********** RX queue pairs **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) amb_rxq * rxq = &dev->rxq[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) spin_lock_irqsave (&rxq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (rxq->pending < rxq->maximum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) PRINTD (DBG_RX, "RX in slot %p", rxq->in.ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) *rxq->in.ptr = *rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) rxq->pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) // hand over the RX buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) spin_unlock_irqrestore (&rxq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) spin_unlock_irqrestore (&rxq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static int rx_take (amb_dev * dev, unsigned char pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) amb_rxq * rxq = &dev->rxq[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) spin_lock_irqsave (&rxq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (rxq->pending && (rxq->out.ptr->status || rxq->out.ptr->length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) // deal with RX completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) rx_complete (dev, rxq->out.ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) // mark unused again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) rxq->out.ptr->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) rxq->out.ptr->length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) // remove item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) rxq->pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) rxq->out.ptr = NEXTQ (rxq->out.ptr, rxq->out.start, rxq->out.limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (rxq->pending < rxq->low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) rxq->low = rxq->pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) spin_unlock_irqrestore (&rxq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (!rxq->pending && rxq->buffers_wanted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) rxq->emptied++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) spin_unlock_irqrestore (&rxq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /********** RX Pool handling **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /* pre: buffers_wanted = 0, post: pending = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static void drain_rx_pool (amb_dev * dev, unsigned char pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) amb_rxq * rxq = &dev->rxq[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (test_bit (dead, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /* we are not quite like the fill pool routines as we cannot just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) remove one buffer, we have to remove all of them, but we might as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) well pretend... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (rxq->pending > rxq->buffers_wanted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) cmd.request = cpu_to_be32 (SRB_FLUSH_BUFFER_Q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) cmd.args.flush.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) while (command_do (dev, &cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* the pool may also be emptied via the interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) while (rxq->pending > rxq->buffers_wanted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (rx_take (dev, pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static void drain_rx_pools (amb_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) unsigned char pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pools %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) for (pool = 0; pool < NUM_RX_POOLS; ++pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) drain_rx_pool (dev, pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static void fill_rx_pool (amb_dev * dev, unsigned char pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) gfp_t priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) rx_in rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) amb_rxq * rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pool %p %hu %x", dev, pool, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (test_bit (dead, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) rxq = &dev->rxq[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) while (rxq->pending < rxq->maximum && rxq->pending < rxq->buffers_wanted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct sk_buff * skb = alloc_skb (rxq->buffer_size, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) PRINTD (DBG_SKB|DBG_POOL, "failed to allocate skb for RX pool %hu", pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (check_area (skb->data, skb->truesize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) dev_kfree_skb_any (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) // cast needed as there is no %? for pointer differences
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) skb, skb->head, (long) skb_end_offset(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) rx.handle = virt_to_bus (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (rx_give (dev, &rx, pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) dev_kfree_skb_any (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) // top up all RX pools
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static void fill_rx_pools (amb_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) unsigned char pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pools %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) for (pool = 0; pool < NUM_RX_POOLS; ++pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) fill_rx_pool (dev, pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /********** enable host interrupts **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static void interrupts_on (amb_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) wr_plain (dev, offsetof(amb_mem, interrupt_control),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) rd_plain (dev, offsetof(amb_mem, interrupt_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) | AMB_INTERRUPT_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /********** disable host interrupts **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static void interrupts_off (amb_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) wr_plain (dev, offsetof(amb_mem, interrupt_control),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) rd_plain (dev, offsetof(amb_mem, interrupt_control))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) &~ AMB_INTERRUPT_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /********** interrupt handling **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) static irqreturn_t interrupt_handler(int irq, void *dev_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) amb_dev * dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler: %p", dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) u32 interrupt = rd_plain (dev, offsetof(amb_mem, interrupt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) // for us or someone else sharing the same interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (!interrupt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) PRINTD (DBG_IRQ, "irq not for me: %d", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) // definitely for us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) PRINTD (DBG_IRQ, "FYI: interrupt was %08x", interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) wr_plain (dev, offsetof(amb_mem, interrupt), -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) unsigned int irq_work = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) unsigned char pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) for (pool = 0; pool < NUM_RX_POOLS; ++pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) while (!rx_take (dev, pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ++irq_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) while (!tx_take (dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ++irq_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (irq_work) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) fill_rx_pools (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) PRINTD (DBG_IRQ, "work done: %u", irq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) PRINTD (DBG_IRQ|DBG_WARN, "no work done");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /********** make rate (not quite as much fun as Horizon) **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static int make_rate (unsigned int rate, rounding r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) u16 * bits, unsigned int * actual) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) unsigned char exp = -1; // hush gcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) unsigned int man = -1; // hush gcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) PRINTD (DBG_FLOW|DBG_QOS, "make_rate %u", rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) // rates in cells per second, ITU format (nasty 16-bit floating-point)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) // given 5-bit e and 9-bit m:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) // rate = EITHER (1+m/2^9)*2^e OR 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) // bits = EITHER 1<<14 | e<<9 | m OR 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) // (bit 15 is "reserved", bit 14 "non-zero")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) // smallest rate is 0 (special representation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) // largest rate is (1+511/512)*2^31 = 4290772992 (< 2^32-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) // smallest non-zero rate is (1+0/512)*2^0 = 1 (> 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) // simple algorithm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) // find position of top bit, this gives e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) // remove top bit and shift (rounding if feeling clever) by 9-e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) // ucode bug: please don't set bit 14! so 0 rate not representable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (rate > 0xffc00000U) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) // larger than largest representable rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (r == round_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) exp = 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) man = 511;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) } else if (rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) // representable rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) exp = 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) man = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) // invariant: rate = man*2^(exp-31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) while (!(man & (1<<31))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) exp = exp - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) man = man<<1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) // man has top bit set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) // rate = (2^31+(man-2^31))*2^(exp-31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) // rate = (1+(man-2^31)/2^31)*2^exp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) man = man<<1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) man &= 0xffffffffU; // a nop on 32-bit systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) // rate = (1+man/2^32)*2^exp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) // exp is in the range 0 to 31, man is in the range 0 to 2^32-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) // time to lose significance... we want m in the range 0 to 2^9-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) // rounding presents a minor problem... we first decide which way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) // we are rounding (based on given rounding direction and possibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) // the bits of the mantissa that are to be discarded).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) switch (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) case round_down: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) // just truncate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) man = man>>(32-9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) case round_up: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) // check all bits that we are discarding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (man & (~0U>>9)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) man = (man>>(32-9)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (man == (1<<9)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) // no need to check for round up outside of range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) man = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) exp += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) man = (man>>(32-9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) case round_nearest: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) // check msb that we are discarding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (man & (1<<(32-9-1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) man = (man>>(32-9)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (man == (1<<9)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) // no need to check for round up outside of range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) man = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) exp += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) man = (man>>(32-9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) // zero rate - not representable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (r == round_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) exp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) man = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) PRINTD (DBG_QOS, "rate: man=%u, exp=%hu", man, exp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) *bits = /* (1<<14) | */ (exp<<9) | man;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (actual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) *actual = (exp >= 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ? (1 << exp) + (man << (exp-9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) : (1 << exp) + ((man + (1<<(9-exp-1))) >> (9-exp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /********** Linux ATM Operations **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) // some are not yet implemented while others do not make sense for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) // this device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /********** Open a VC **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) static int amb_open (struct atm_vcc * atm_vcc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct atm_qos * qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct atm_trafprm * txtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct atm_trafprm * rxtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) u16 tx_rate_bits = -1; // hush gcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) u16 tx_vc_bits = -1; // hush gcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) u16 tx_frame_bits = -1; // hush gcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) amb_dev * dev = AMB_DEV(atm_vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) amb_vcc * vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) unsigned char pool = -1; // hush gcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) short vpi = atm_vcc->vpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) int vci = atm_vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) PRINTD (DBG_FLOW|DBG_VCC, "amb_open %x %x", vpi, vci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) #ifdef ATM_VPI_UNSPEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) // UNSPEC is deprecated, remove this code eventually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (!(0 <= vpi && vpi < (1<<NUM_VPI_BITS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 0 <= vci && vci < (1<<NUM_VCI_BITS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) qos = &atm_vcc->qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (qos->aal != ATM_AAL5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) PRINTD (DBG_QOS, "AAL not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) // traffic parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) PRINTD (DBG_QOS, "TX:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) txtp = &qos->txtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (txtp->traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) switch (txtp->traffic_class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) case ATM_UBR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) // we take "the PCR" as a rate-cap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) int pcr = atm_pcr_goal (txtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (!pcr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) // no rate cap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) tx_rate_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) tx_vc_bits = TX_UBR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) tx_frame_bits = TX_FRAME_NOTCAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) rounding r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (pcr < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) r = round_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) pcr = -pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) r = round_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) error = make_rate (pcr, r, &tx_rate_bits, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) tx_vc_bits = TX_UBR_CAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) tx_frame_bits = TX_FRAME_CAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) case ATM_ABR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) pcr = atm_pcr_goal (txtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) PRINTD (DBG_QOS, "pcr goal = %d", pcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) default: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) // PRINTD (DBG_QOS, "request for non-UBR/ABR denied");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) PRINTD (DBG_QOS, "request for non-UBR denied");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) PRINTD (DBG_QOS, "tx_rate_bits=%hx, tx_vc_bits=%hx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) tx_rate_bits, tx_vc_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) PRINTD (DBG_QOS, "RX:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) rxtp = &qos->rxtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (rxtp->traffic_class == ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) // do nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) // choose an RX pool (arranged in increasing size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) for (pool = 0; pool < NUM_RX_POOLS; ++pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if ((unsigned int) rxtp->max_sdu <= dev->rxq[pool].buffer_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) PRINTD (DBG_VCC|DBG_QOS|DBG_POOL, "chose pool %hu (max_sdu %u <= %u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) pool, rxtp->max_sdu, dev->rxq[pool].buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (pool == NUM_RX_POOLS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) PRINTD (DBG_WARN|DBG_VCC|DBG_QOS|DBG_POOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) "no pool suitable for VC (RX max_sdu %d is too large)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) rxtp->max_sdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) switch (rxtp->traffic_class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) case ATM_UBR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) case ATM_ABR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) pcr = atm_pcr_goal (rxtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) PRINTD (DBG_QOS, "pcr goal = %d", pcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) default: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) // PRINTD (DBG_QOS, "request for non-UBR/ABR denied");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) PRINTD (DBG_QOS, "request for non-UBR denied");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) // get space for our vcc stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) vcc = kmalloc (sizeof(amb_vcc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (!vcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) PRINTK (KERN_ERR, "out of memory!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) atm_vcc->dev_data = (void *) vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) // no failures beyond this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) // we are not really "immediately before allocating the connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) // identifier in hardware", but it will just have to do!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) set_bit(ATM_VF_ADDR,&atm_vcc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (txtp->traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) vcc->tx_frame_bits = tx_frame_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) mutex_lock(&dev->vcc_sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (dev->rxer[vci]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) // RXer on the channel already, just modify rate...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) cmd.args.modify_rate.vc = cpu_to_be32 (vci); // vpi 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) cmd.args.modify_rate.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) while (command_do (dev, &cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) // ... and TX flags, preserving the RX pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) cmd.args.modify_flags.flags = cpu_to_be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ( (AMB_VCC(dev->rxer[vci])->rx_info.pool << SRB_POOL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) | (tx_vc_bits << SRB_FLAGS_SHIFT) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) while (command_do (dev, &cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) // no RXer on the channel, just open (with pool zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) cmd.request = cpu_to_be32 (SRB_OPEN_VC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) cmd.args.open.vc = cpu_to_be32 (vci); // vpi 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) cmd.args.open.flags = cpu_to_be32 (tx_vc_bits << SRB_FLAGS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) cmd.args.open.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) while (command_do (dev, &cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) dev->txer[vci].tx_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) mutex_unlock(&dev->vcc_sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (rxtp->traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) vcc->rx_info.pool = pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) mutex_lock(&dev->vcc_sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /* grow RX buffer pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (!dev->rxq[pool].buffers_wanted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) dev->rxq[pool].buffers_wanted = rx_lats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) dev->rxq[pool].buffers_wanted += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) fill_rx_pool (dev, pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (dev->txer[vci].tx_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) // TXer on the channel already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) // switch (from pool zero) to this pool, preserving the TX bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) cmd.args.modify_flags.flags = cpu_to_be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ( (pool << SRB_POOL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) | (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) // no TXer on the channel, open the VC (with no rate info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) cmd.request = cpu_to_be32 (SRB_OPEN_VC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) cmd.args.open.vc = cpu_to_be32 (vci); // vpi 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) cmd.args.open.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) cmd.args.open.rate = cpu_to_be32 (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) while (command_do (dev, &cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) // this link allows RX frames through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) dev->rxer[vci] = atm_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) mutex_unlock(&dev->vcc_sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) // indicate readiness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) set_bit(ATM_VF_READY,&atm_vcc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /********** Close a VC **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static void amb_close (struct atm_vcc * atm_vcc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) amb_dev * dev = AMB_DEV (atm_vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) amb_vcc * vcc = AMB_VCC (atm_vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) u16 vci = atm_vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) PRINTD (DBG_VCC|DBG_FLOW, "amb_close");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) // indicate unreadiness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) clear_bit(ATM_VF_READY,&atm_vcc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) // disable TXing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) mutex_lock(&dev->vcc_sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (dev->rxer[vci]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) // RXer still on the channel, just modify rate... XXX not really needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) cmd.args.modify_rate.vc = cpu_to_be32 (vci); // vpi 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) cmd.args.modify_rate.rate = cpu_to_be32 (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) // ... and clear TX rate flags (XXX to stop RM cell output?), preserving RX pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) // no RXer on the channel, close channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) cmd.request = cpu_to_be32 (SRB_CLOSE_VC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) dev->txer[vci].tx_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) while (command_do (dev, &cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) mutex_unlock(&dev->vcc_sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) // disable RXing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) // this is (the?) one reason why we need the amb_vcc struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) unsigned char pool = vcc->rx_info.pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) mutex_lock(&dev->vcc_sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (dev->txer[vci].tx_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) // TXer still on the channel, just go to pool zero XXX not really needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) cmd.args.modify_flags.flags = cpu_to_be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) // no TXer on the channel, close the VC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) cmd.request = cpu_to_be32 (SRB_CLOSE_VC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) // forget the rxer - no more skbs will be pushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (atm_vcc != dev->rxer[vci])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) PRINTK (KERN_ERR, "%s vcc=%p rxer[vci]=%p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) "arghhh! we're going to die!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) vcc, dev->rxer[vci]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) dev->rxer[vci] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) while (command_do (dev, &cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /* shrink RX buffer pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) dev->rxq[pool].buffers_wanted -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (dev->rxq[pool].buffers_wanted == rx_lats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) dev->rxq[pool].buffers_wanted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) drain_rx_pool (dev, pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) mutex_unlock(&dev->vcc_sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) // free our structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) kfree (vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) // say the VPI/VCI is free again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) clear_bit(ATM_VF_ADDR,&atm_vcc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /********** Send **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) amb_dev * dev = AMB_DEV(atm_vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) amb_vcc * vcc = AMB_VCC(atm_vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) u16 vc = atm_vcc->vci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) unsigned int tx_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) unsigned char * tx_data = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) tx_simple * tx_descr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) tx_in tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (test_bit (dead, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) PRINTD (DBG_FLOW|DBG_TX, "amb_send vc %x data %p len %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) vc, tx_data, tx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) dump_skb (">>>", vc, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (!dev->txer[vc].tx_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) // this is a driver private field so we have to set it ourselves,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) // despite the fact that we are _required_ to use it to check for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) // pop function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) ATM_SKB(skb)->vcc = atm_vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (skb->len > (size_t) atm_vcc->qos.txtp.max_sdu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (check_area (skb->data, skb->len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) atomic_inc(&atm_vcc->stats->tx_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return -ENOMEM; // ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) // allocate memory for fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) tx_descr = kmalloc (sizeof(tx_simple), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (!tx_descr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) PRINTK (KERN_ERR, "could not allocate TX descriptor");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (check_area (tx_descr, sizeof(tx_simple))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) kfree (tx_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) PRINTD (DBG_TX, "fragment list allocated at %p", tx_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) tx_descr->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) tx_descr->tx_frag.bytes = cpu_to_be32 (tx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) tx_descr->tx_frag.address = cpu_to_be32 (virt_to_bus (tx_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) tx_descr->tx_frag_end.handle = virt_to_bus (tx_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) tx_descr->tx_frag_end.vc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) tx_descr->tx_frag_end.next_descriptor_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) tx_descr->tx_frag_end.next_descriptor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) #ifdef AMB_NEW_MICROCODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) tx_descr->tx_frag_end.cpcs_uu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) tx_descr->tx_frag_end.cpi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) tx_descr->tx_frag_end.pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) tx.vc = cpu_to_be16 (vcc->tx_frame_bits | vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) tx.tx_descr_length = cpu_to_be16 (sizeof(tx_frag)+sizeof(tx_frag_end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) tx.tx_descr_addr = cpu_to_be32 (virt_to_bus (&tx_descr->tx_frag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) while (tx_give (dev, &tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) /********** Change QoS on a VC **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) // int amb_change_qos (struct atm_vcc * atm_vcc, struct atm_qos * qos, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /********** Free RX Socket Buffer **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static void amb_free_rx_skb (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) amb_dev * dev = AMB_DEV (atm_vcc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) amb_vcc * vcc = AMB_VCC (atm_vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) unsigned char pool = vcc->rx_info.pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) rx_in rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) // This may be unsafe for various reasons that I cannot really guess
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) // at. However, I note that the ATM layer calls kfree_skb rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) // than dev_kfree_skb at this point so we are least covered as far
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) // as buffer locking goes. There may be bugs if pcap clones RX skbs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) PRINTD (DBG_FLOW|DBG_SKB, "amb_rx_free skb %p (atm_vcc %p, vcc %p)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) skb, atm_vcc, vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) rx.handle = virt_to_bus (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) skb->data = skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) skb_reset_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) skb->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (!rx_give (dev, &rx, pool)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) // success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) PRINTD (DBG_SKB|DBG_POOL, "recycled skb for pool %hu", pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) // just do what the ATM layer would have done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) dev_kfree_skb_any (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) /********** Proc File Output **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) static int amb_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) amb_dev * dev = AMB_DEV (atm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) int left = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) unsigned char pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) PRINTD (DBG_FLOW, "amb_proc_read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* more diagnostics here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (!left--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) amb_stats * s = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return sprintf (page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) "frames: TX OK %lu, RX OK %lu, RX bad %lu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) "(CRC %lu, long %lu, aborted %lu, unused %lu).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) s->tx_ok, s->rx.ok, s->rx.error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) s->rx.badcrc, s->rx.toolong,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) s->rx.aborted, s->rx.unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (!left--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) amb_cq * c = &dev->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return sprintf (page, "cmd queue [cur/hi/max]: %u/%u/%u. ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) c->pending, c->high, c->maximum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (!left--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) amb_txq * t = &dev->txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return sprintf (page, "TX queue [cur/max high full]: %u/%u %u %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) t->pending, t->maximum, t->high, t->filled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (!left--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) unsigned int count = sprintf (page, "RX queues [cur/max/req low empty]:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) amb_rxq * r = &dev->rxq[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) count += sprintf (page+count, " %u/%u/%u %u %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) r->pending, r->maximum, r->buffers_wanted, r->low, r->emptied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) count += sprintf (page+count, ".\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (!left--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) unsigned int count = sprintf (page, "RX buffer sizes:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) amb_rxq * r = &dev->rxq[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) count += sprintf (page+count, " %u", r->buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) count += sprintf (page+count, ".\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (!left--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) // suni block etc?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /********** Operation Structure **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static const struct atmdev_ops amb_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) .open = amb_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) .close = amb_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) .send = amb_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) .proc_read = amb_proc_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) /********** housekeeping **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static void do_housekeeping (struct timer_list *t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) amb_dev * dev = from_timer(dev, t, housekeeping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) // could collect device-specific (not driver/atm-linux) stats here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) // last resort refill once every ten seconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) fill_rx_pools (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) mod_timer(&dev->housekeeping, jiffies + 10*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /********** creation of communication queues **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) static int create_queues(amb_dev *dev, unsigned int cmds, unsigned int txs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) unsigned int *rxs, unsigned int *rx_buffer_sizes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) unsigned char pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) size_t total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) void * memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) void * limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) PRINTD (DBG_FLOW, "create_queues %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) total += cmds * sizeof(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) total += txs * (sizeof(tx_in) + sizeof(tx_out));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) for (pool = 0; pool < NUM_RX_POOLS; ++pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) total += rxs[pool] * (sizeof(rx_in) + sizeof(rx_out));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) memory = kmalloc (total, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (!memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) PRINTK (KERN_ERR, "could not allocate queues");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (check_area (memory, total)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) PRINTK (KERN_ERR, "queues allocated in nasty area");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) kfree (memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) limit = memory + total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) PRINTD (DBG_INIT, "queues from %p to %p", memory, limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) PRINTD (DBG_CMD, "command queue at %p", memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) command * cmd = memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) amb_cq * cq = &dev->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) cq->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) cq->high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) cq->maximum = cmds - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) cq->ptrs.start = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) cq->ptrs.in = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) cq->ptrs.out = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) cq->ptrs.limit = cmd + cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) memory = cq->ptrs.limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) PRINTD (DBG_TX, "TX queue pair at %p", memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) tx_in * in = memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) tx_out * out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) amb_txq * txq = &dev->txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) txq->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) txq->high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) txq->filled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) txq->maximum = txs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) txq->in.start = in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) txq->in.ptr = in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) txq->in.limit = in + txs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) memory = txq->in.limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) out = memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) txq->out.start = out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) txq->out.ptr = out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) txq->out.limit = out + txs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) memory = txq->out.limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) PRINTD (DBG_RX, "RX queue pairs at %p", memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) rx_in * in = memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) rx_out * out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) amb_rxq * rxq = &dev->rxq[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) rxq->buffer_size = rx_buffer_sizes[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) rxq->buffers_wanted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) rxq->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) rxq->low = rxs[pool] - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) rxq->emptied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) rxq->maximum = rxs[pool] - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) rxq->in.start = in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) rxq->in.ptr = in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) rxq->in.limit = in + rxs[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) memory = rxq->in.limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) out = memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) rxq->out.start = out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) rxq->out.ptr = out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) rxq->out.limit = out + rxs[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) memory = rxq->out.limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (memory == limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) PRINTK (KERN_ERR, "bad queue alloc %p != %p (tell maintainer)", memory, limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) kfree (limit - total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /********** destruction of communication queues **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) static void destroy_queues (amb_dev * dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) // all queues assumed empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) void * memory = dev->cq.ptrs.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) // includes txq.in, txq.out, rxq[].in and rxq[].out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) PRINTD (DBG_FLOW, "destroy_queues %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) PRINTD (DBG_INIT, "freeing queues at %p", memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) kfree (memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /********** basic loader commands and error handling **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) // centisecond timeouts - guessing away here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) static unsigned int command_timeouts [] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) [host_memory_test] = 15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) [read_adapter_memory] = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) [write_adapter_memory] = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) [adapter_start] = 50,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) [get_version_number] = 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) [interrupt_host] = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) [flash_erase_sector] = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) [adap_download_block] = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) [adap_erase_flash] = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) [adap_run_in_iram] = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) [adap_end_download] = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) static unsigned int command_successes [] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) [host_memory_test] = COMMAND_PASSED_TEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) [read_adapter_memory] = COMMAND_READ_DATA_OK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) [write_adapter_memory] = COMMAND_WRITE_DATA_OK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) [adapter_start] = COMMAND_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) [get_version_number] = COMMAND_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) [interrupt_host] = COMMAND_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) [flash_erase_sector] = COMMAND_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) [adap_download_block] = COMMAND_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) [adap_erase_flash] = COMMAND_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) [adap_run_in_iram] = COMMAND_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) [adap_end_download] = COMMAND_COMPLETE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) static int decode_loader_result (loader_command cmd, u32 result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) const char *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (result == command_successes[cmd])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) case BAD_COMMAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) msg = "bad command";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) case COMMAND_IN_PROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) res = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) msg = "command in progress";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) case COMMAND_PASSED_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) msg = "command passed test";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) case COMMAND_FAILED_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) res = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) msg = "command failed test";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) case COMMAND_READ_DATA_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) msg = "command read data ok";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) case COMMAND_READ_BAD_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) msg = "command read bad address";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) case COMMAND_WRITE_DATA_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) msg = "command write data ok";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) case COMMAND_WRITE_BAD_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) msg = "command write bad address";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) case COMMAND_WRITE_FLASH_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) res = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) msg = "command write flash failure";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) case COMMAND_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) msg = "command complete";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) case COMMAND_FLASH_ERASE_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) res = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) msg = "command flash erase failure";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) case COMMAND_WRITE_BAD_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) msg = "command write bad data";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) msg = "unknown error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) PRINTD (DBG_LOAD|DBG_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) "decode_loader_result got %d=%x !",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) result, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) PRINTK (KERN_ERR, "%s", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) static int do_loader_command(volatile loader_block *lb, const amb_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) loader_command cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) PRINTD (DBG_FLOW|DBG_LOAD, "do_loader_command");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) /* do a command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) Set the return value to zero, set the command type and set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) valid entry to the right magic value. The payload is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) correctly byte-ordered so we leave it alone. Hit the doorbell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) with the bus address of this structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) lb->result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) lb->command = cpu_to_be32 (cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) lb->valid = cpu_to_be32 (DMA_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) // dump_registers (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) // dump_loader_block (lb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (lb) & ~onegigmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) timeout = command_timeouts[cmd] * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) while (!lb->result || lb->result == cpu_to_be32 (COMMAND_IN_PROGRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) timeout = msleep_interruptible(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) PRINTD (DBG_LOAD|DBG_ERR, "command %d timed out", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) dump_registers (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) dump_loader_block (lb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (cmd == adapter_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) // wait for start command to acknowledge...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) timeout = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) while (rd_plain (dev, offsetof(amb_mem, doorbell)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) timeout = msleep_interruptible(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) PRINTD (DBG_LOAD|DBG_ERR, "start command did not clear doorbell, res=%08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) be32_to_cpu (lb->result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) dump_registers (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) return decode_loader_result (cmd, be32_to_cpu (lb->result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) /* loader: determine loader version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) static int get_loader_version(loader_block *lb, const amb_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) u32 *version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) PRINTD (DBG_FLOW|DBG_LOAD, "get_loader_version");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) res = do_loader_command (lb, dev, get_version_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) *version = be32_to_cpu (lb->payload.version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /* loader: write memory data blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) static int loader_write(loader_block *lb, const amb_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) const struct ihex_binrec *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) transfer_block * tb = &lb->payload.transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) PRINTD (DBG_FLOW|DBG_LOAD, "loader_write");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) tb->address = rec->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) memcpy(tb->data, rec->data, be16_to_cpu(rec->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return do_loader_command (lb, dev, write_adapter_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) /* loader: verify memory data blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static int loader_verify(loader_block *lb, const amb_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) const struct ihex_binrec *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) transfer_block * tb = &lb->payload.transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) PRINTD (DBG_FLOW|DBG_LOAD, "loader_verify");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) tb->address = rec->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) res = do_loader_command (lb, dev, read_adapter_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (!res && memcmp(tb->data, rec->data, be16_to_cpu(rec->len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /* loader: start microcode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) static int loader_start(loader_block *lb, const amb_dev *dev, u32 address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) PRINTD (DBG_FLOW|DBG_LOAD, "loader_start");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) lb->payload.start = cpu_to_be32 (address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) return do_loader_command (lb, dev, adapter_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) /********** reset card **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) static inline void sf (const char * msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) PRINTK (KERN_ERR, "self-test failed: %s", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) static int amb_reset (amb_dev * dev, int diags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) u32 word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) PRINTD (DBG_FLOW|DBG_LOAD, "amb_reset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) word = rd_plain (dev, offsetof(amb_mem, reset_control));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) // put card into reset state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) wr_plain (dev, offsetof(amb_mem, reset_control), word | AMB_RESET_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) // wait a short while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) udelay (10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) // put card into known good state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) wr_plain (dev, offsetof(amb_mem, interrupt_control), AMB_DOORBELL_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) // clear all interrupts just in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) wr_plain (dev, offsetof(amb_mem, interrupt), -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) // clear self-test done flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) wr_plain (dev, offsetof(amb_mem, mb.loader.ready), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) // take card out of reset state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) wr_plain (dev, offsetof(amb_mem, reset_control), word &~ AMB_RESET_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (diags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) // 4.2 second wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) msleep(4200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) // half second time-out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) timeout = 500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) while (!rd_plain (dev, offsetof(amb_mem, mb.loader.ready)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) timeout = msleep_interruptible(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) PRINTD (DBG_LOAD|DBG_ERR, "reset timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) // get results of self-test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) // XXX double check byte-order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) word = rd_mem (dev, offsetof(amb_mem, mb.loader.result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (word & SELF_TEST_FAILURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (word & GPINT_TST_FAILURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) sf ("interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (word & SUNI_DATA_PATTERN_FAILURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) sf ("SUNI data pattern");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (word & SUNI_DATA_BITS_FAILURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) sf ("SUNI data bits");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) if (word & SUNI_UTOPIA_FAILURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) sf ("SUNI UTOPIA interface");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (word & SUNI_FIFO_FAILURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) sf ("SUNI cell buffer FIFO");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (word & SRAM_FAILURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) sf ("bad SRAM");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) // better return value?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) /********** transfer and start the microcode **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) static int ucode_init(loader_block *lb, amb_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) const struct firmware *fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) unsigned long start_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) const struct ihex_binrec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) const char *errmsg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) PRINTK (KERN_ERR, "Cannot load microcode data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) /* First record contains just the start address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) rec = (const struct ihex_binrec *)fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) errmsg = "no start record";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) start_address = be32_to_cpup((__be32 *)rec->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) rec = ihex_next_binrec(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) PRINTD (DBG_FLOW|DBG_LOAD, "ucode_init");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) while (rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) be16_to_cpu(rec->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) errmsg = "record too long";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (be16_to_cpu(rec->len) & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) errmsg = "odd number of bytes";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) res = loader_write(lb, dev, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) res = loader_verify(lb, dev, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) rec = ihex_next_binrec(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) res = loader_start(lb, dev, start_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) /********** give adapter parameters **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) static inline __be32 bus_addr(void * addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) return cpu_to_be32 (virt_to_bus (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) static int amb_talk(amb_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) adap_talk_block a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) unsigned char pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) PRINTD (DBG_FLOW, "amb_talk %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) a.command_start = bus_addr (dev->cq.ptrs.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) a.command_end = bus_addr (dev->cq.ptrs.limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) a.tx_start = bus_addr (dev->txq.in.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) a.tx_end = bus_addr (dev->txq.in.limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) a.txcom_start = bus_addr (dev->txq.out.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) a.txcom_end = bus_addr (dev->txq.out.limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) // the other "a" items are set up by the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) a.rec_struct[pool].buffer_start = bus_addr (dev->rxq[pool].in.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) a.rec_struct[pool].buffer_end = bus_addr (dev->rxq[pool].in.limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) a.rec_struct[pool].rx_start = bus_addr (dev->rxq[pool].out.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) a.rec_struct[pool].rx_end = bus_addr (dev->rxq[pool].out.limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) a.rec_struct[pool].buffer_size = cpu_to_be32 (dev->rxq[pool].buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) #ifdef AMB_NEW_MICROCODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) // disable fast PLX prefetching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) a.init_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) // pass the structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (&a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) // 2.2 second wait (must not touch doorbell during 2 second DMA test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) msleep(2200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) // give the adapter another half second?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) timeout = 500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) while (rd_plain (dev, offsetof(amb_mem, doorbell)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) timeout = msleep_interruptible(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) PRINTD (DBG_INIT|DBG_ERR, "adapter init timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) // get microcode version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) static void amb_ucode_version(amb_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) u32 major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) u32 minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) cmd.request = cpu_to_be32 (SRB_GET_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) while (command_do (dev, &cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) major = be32_to_cpu (cmd.args.version.major);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) minor = be32_to_cpu (cmd.args.version.minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) PRINTK (KERN_INFO, "microcode version is %u.%u", major, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) // get end station address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) static void amb_esi(amb_dev *dev, u8 *esi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) u32 lower4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) u16 upper2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) command cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) cmd.request = cpu_to_be32 (SRB_GET_BIA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) while (command_do (dev, &cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) lower4 = be32_to_cpu (cmd.args.bia.lower4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) upper2 = be32_to_cpu (cmd.args.bia.upper2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) PRINTD (DBG_LOAD, "BIA: lower4: %08x, upper2 %04x", lower4, upper2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) if (esi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) PRINTDB (DBG_INIT, "ESI:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) for (i = 0; i < ESI_LEN; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (i < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) esi[i] = bitrev8(lower4>>(8*i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) esi[i] = bitrev8(upper2>>(8*(i-4)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) PRINTDM (DBG_INIT, " %02x", esi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) PRINTDE (DBG_INIT, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) static void fixup_plx_window (amb_dev *dev, loader_block *lb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) // fix up the PLX-mapped window base address to match the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) unsigned long blb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) u32 mapreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) blb = virt_to_bus(lb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) // the kernel stack had better not ever cross a 1Gb boundary!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) mapreg = rd_plain (dev, offsetof(amb_mem, stuff[10]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) mapreg &= ~onegigmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) mapreg |= blb & onegigmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) wr_plain (dev, offsetof(amb_mem, stuff[10]), mapreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) static int amb_init(amb_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) loader_block lb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (amb_reset (dev, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) PRINTK (KERN_ERR, "card reset failed!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) fixup_plx_window (dev, &lb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) if (get_loader_version (&lb, dev, &version)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) PRINTK (KERN_INFO, "failed to get loader version");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) PRINTK (KERN_INFO, "loader version is %08x", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (ucode_init (&lb, dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) PRINTK (KERN_ERR, "microcode failure");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) } else if (create_queues (dev, cmds, txs, rxs, rxs_bs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) PRINTK (KERN_ERR, "failed to get memory for queues");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) if (amb_talk (dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) PRINTK (KERN_ERR, "adapter did not accept queues");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) amb_ucode_version (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) } /* amb_talk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) destroy_queues (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) } /* create_queues, ucode_init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) amb_reset (dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) } /* get_loader_version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) } /* amb_reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) unsigned char pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) // set up known dev items straight away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) dev->pci_dev = pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) pci_set_drvdata(pci_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) dev->iobase = pci_resource_start (pci_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) dev->irq = pci_dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) dev->membase = bus_to_virt(pci_resource_start(pci_dev, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) // flags (currently only dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) dev->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) // Allocate cell rates (fibre)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) // ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) // to be really pedantic, this should be ATM_OC3c_PCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) dev->tx_avail = ATM_OC3_PCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) dev->rx_avail = ATM_OC3_PCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) // semaphore for txer/rxer modifications - we cannot use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) // spinlock as the critical region needs to switch processes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) mutex_init(&dev->vcc_sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) // queue manipulation spinlocks; we want atomic reads and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) // writes to the queue descriptors (handles IRQ and SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) // consider replacing "int pending" -> "atomic_t available"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) // => problem related to who gets to move queue pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) spin_lock_init (&dev->cq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) spin_lock_init (&dev->txq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) for (pool = 0; pool < NUM_RX_POOLS; ++pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) spin_lock_init (&dev->rxq[pool].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) static void setup_pci_dev(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) unsigned char lat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) // enable bus master accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) pci_set_master(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) // frobnicate latency (upwards, usually)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &lat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) if (!pci_lat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) pci_lat = (lat < MIN_PCI_LATENCY) ? MIN_PCI_LATENCY : lat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (lat != pci_lat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) PRINTK (KERN_INFO, "Changing PCI latency timer from %hu to %hu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) lat, pci_lat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) static int amb_probe(struct pci_dev *pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) const struct pci_device_id *pci_ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) amb_dev * dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) err = pci_enable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) // read resources from PCI configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) irq = pci_dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (pci_dev->device == PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) goto out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) PRINTD (DBG_INFO, "found Madge ATM adapter (amb) at"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) " IO %llx, IRQ %u, MEM %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) (unsigned long long)pci_resource_start(pci_dev, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) irq, bus_to_virt(pci_resource_start(pci_dev, 0)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) // check IO region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) err = pci_request_region(pci_dev, 1, DEV_LABEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) PRINTK (KERN_ERR, "IO range already in use!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) goto out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) dev = kzalloc(sizeof(amb_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) PRINTK (KERN_ERR, "out of memory!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) setup_dev(dev, pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) err = amb_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) PRINTK (KERN_ERR, "adapter initialisation failure");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) setup_pci_dev(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) // grab (but share) IRQ and install handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) err = request_irq(irq, interrupt_handler, IRQF_SHARED, DEV_LABEL, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) PRINTK (KERN_ERR, "request IRQ failed!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) goto out_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) dev->atm_dev = atm_dev_register (DEV_LABEL, &pci_dev->dev, &amb_ops, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) if (!dev->atm_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) PRINTD (DBG_ERR, "failed to register Madge ATM adapter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) goto out_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) dev->atm_dev->number, dev, dev->atm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) dev->atm_dev->dev_data = (void *) dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) // register our address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) amb_esi (dev, dev->atm_dev->esi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) // 0 bits for vpi, 10 bits for vci
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) timer_setup(&dev->housekeeping, do_housekeeping, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) mod_timer(&dev->housekeeping, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) // enable host interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) interrupts_on (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) out_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) free_irq(irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) out_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) amb_reset(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) out_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) pci_release_region(pci_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) out_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) pci_disable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) static void amb_remove_one(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) struct amb_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) dev = pci_get_drvdata(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) PRINTD(DBG_INFO|DBG_INIT, "closing %p (atm_dev = %p)", dev, dev->atm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) del_timer_sync(&dev->housekeeping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) // the drain should not be necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) drain_rx_pools(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) interrupts_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) amb_reset(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) pci_disable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) destroy_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) atm_dev_deregister(dev->atm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) pci_release_region(pci_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) static void __init amb_check_args (void) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) unsigned char pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) unsigned int max_rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) #ifdef DEBUG_AMBASSADOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) if (debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) PRINTK (KERN_NOTICE, "no debugging support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) if (cmds < MIN_QUEUE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) PRINTK (KERN_NOTICE, "cmds has been raised to %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) cmds = MIN_QUEUE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (txs < MIN_QUEUE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) PRINTK (KERN_NOTICE, "txs has been raised to %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) txs = MIN_QUEUE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) for (pool = 0; pool < NUM_RX_POOLS; ++pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) if (rxs[pool] < MIN_QUEUE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) PRINTK (KERN_NOTICE, "rxs[%hu] has been raised to %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) pool, rxs[pool] = MIN_QUEUE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) // buffers sizes should be greater than zero and strictly increasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) max_rx_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) for (pool = 0; pool < NUM_RX_POOLS; ++pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (rxs_bs[pool] <= max_rx_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) PRINTK (KERN_NOTICE, "useless pool (rxs_bs[%hu] = %u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) pool, rxs_bs[pool]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) max_rx_size = rxs_bs[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) if (rx_lats < MIN_RX_BUFFERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) PRINTK (KERN_NOTICE, "rx_lats has been raised to %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) rx_lats = MIN_RX_BUFFERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) /********** module stuff **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) MODULE_AUTHOR(maintainer_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) MODULE_DESCRIPTION(description_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) MODULE_FIRMWARE("atmsar11.fw");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) module_param(debug, ushort, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) module_param(cmds, uint, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) module_param(txs, uint, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) module_param_array(rxs, uint, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) module_param_array(rxs_bs, uint, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) module_param(rx_lats, uint, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) module_param(pci_lat, byte, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) MODULE_PARM_DESC(debug, "debug bitmap, see .h file");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) MODULE_PARM_DESC(cmds, "number of command queue entries");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) MODULE_PARM_DESC(txs, "number of TX queue entries");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) MODULE_PARM_DESC(rxs, "number of RX queue entries [" __MODULE_STRING(NUM_RX_POOLS) "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) MODULE_PARM_DESC(rxs_bs, "size of RX buffers [" __MODULE_STRING(NUM_RX_POOLS) "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) MODULE_PARM_DESC(rx_lats, "number of extra buffers to cope with RX latencies");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) /********** module entry **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) static const struct pci_device_id amb_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) { 0, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) MODULE_DEVICE_TABLE(pci, amb_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) static struct pci_driver amb_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) .name = "amb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) .probe = amb_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) .remove = amb_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) .id_table = amb_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) static int __init amb_module_init (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) PRINTD (DBG_FLOW|DBG_INIT, "init_module");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) BUILD_BUG_ON(sizeof(amb_mem) != 4*16 + 4*12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) show_version();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) amb_check_args();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) // get the juice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) return pci_register_driver(&amb_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) /********** module exit **********/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) static void __exit amb_module_exit (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) PRINTD (DBG_FLOW|DBG_INIT, "cleanup_module");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) pci_unregister_driver(&amb_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) module_init(amb_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) module_exit(amb_module_exit);