^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * linux/drivers/message/fusion/mptlan.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * IP Over Fibre Channel device driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * For use with LSI Fibre Channel PCI chip/adapters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * running LSI Fusion MPT (Message Passing Technology) firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2000-2008 LSI Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (mailto:DL-MPTFusionLinux@lsi.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) the Free Software Foundation; version 2 of the License.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) This program is distributed in the hope that it will be useful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) but WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) NO WARRANTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) solely responsible for determining the appropriateness of using and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) distributing the Program and assumes all risks associated with its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) exercise of rights under this Agreement, including but not limited to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) the risks and costs of program errors, damage to or loss of data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) programs or equipment, and unavailability or interruption of operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) DISCLAIMER OF LIABILITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) You should have received a copy of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) along with this program; if not, write to the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Define statements used for debugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) //#define MPT_LAN_IO_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include "mptlan.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define my_VERSION MPT_LINUX_VERSION_COMMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define MYNAM "mptlan"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) MODULE_VERSION(my_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * MPT LAN message sizes without variable part.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define MPT_LAN_TRANSACTION32_SIZE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) (sizeof(SGETransaction32_t) - sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Fusion MPT LAN private structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct BufferControl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct mpt_lan_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) MPT_ADAPTER *mpt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) atomic_t buckets_out; /* number of unused buckets on IOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int bucketthresh; /* Send more when this many left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int *mpt_txfidx; /* Free Tx Context list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int mpt_txfidx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) spinlock_t txfidx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int *mpt_rxfidx; /* Free Rx Context list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int mpt_rxfidx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) spinlock_t rxfidx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct BufferControl *RcvCtl; /* Receive BufferControl structs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct BufferControl *SendCtl; /* Send BufferControl structs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int max_buckets_out; /* Max buckets to send to IOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int tx_max_out; /* IOC's Tx queue len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u32 total_posted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u32 total_received;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct delayed_work post_buckets_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long post_buckets_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct mpt_lan_ohdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u16 dtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u8 daddr[FC_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u16 stype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u8 saddr[FC_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Forward protos...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) MPT_FRAME_HDR *reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static int mpt_lan_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int mpt_lan_reset(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static int mpt_lan_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) int priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int mpt_lan_receive_post_reply(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) LANReceivePostReply_t *pRecvRep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static int mpt_lan_send_reply(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) LANSendReply_t *pSendRep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Fusion MPT LAN private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static u32 max_buckets_out = 127;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static u32 tx_max_out_p = 127 - 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * lan_reply - Handle all data sent from the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * @ioc: Pointer to MPT_ADAPTER structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * @mf: Pointer to original MPT request frame (NULL if TurboReply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * @reply: Pointer to MPT reply frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * Returns 1 indicating original alloc'd request frame ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * should be freed, or 0 if it shouldn't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct net_device *dev = ioc->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int FreeReqFrame = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) IOC_AND_NETDEV_NAMES_s_s(dev)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) // dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) // mf, reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (mf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u32 tmsg = CAST_PTR_TO_U32(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) tmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) switch (GET_LAN_FORM(tmsg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) // NOTE! (Optimization) First case here is now caught in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) // mptbase.c::mpt_interrupt() routine and callcack here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) // is now skipped for this case!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) case LAN_REPLY_FORM_MESSAGE_CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) // dioprintk((KERN_INFO MYNAM "/lan_reply: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) // "MessageContext turbo reply received\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) FreeReqFrame = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) case LAN_REPLY_FORM_SEND_SINGLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) // dioprintk((MYNAM "/lan_reply: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) // "calling mpt_lan_send_reply (turbo)\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) // Potential BUG here?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) // If/when mpt_lan_send_turbo would return 1 here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) // calling routine (mptbase.c|mpt_interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) // would Oops because mf has already been set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) // to NULL. So after return from this func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) // mpt_interrupt() will attempt to put (NULL) mf ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) // item back onto its adapter FreeQ - Oops!:-(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) // It's Ok, since mpt_lan_send_turbo() *currently*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) // always returns 0, but..., just in case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) (void) mpt_lan_send_turbo(dev, tmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) FreeReqFrame = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) case LAN_REPLY_FORM_RECEIVE_SINGLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) // dioprintk((KERN_INFO MYNAM "@lan_reply: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) // "rcv-Turbo = %08x\n", tmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) mpt_lan_receive_post_turbo(dev, tmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) "that I don't know what to do with\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return FreeReqFrame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) // msg = (u32 *) reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) // dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) // le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) // le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) // dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) // reply->u.hdr.Function));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) switch (reply->u.hdr.Function) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) case MPI_FUNCTION_LAN_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) LANSendReply_t *pSendRep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) pSendRep = (LANSendReply_t *) reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) case MPI_FUNCTION_LAN_RECEIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) LANReceivePostReply_t *pRecvRep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) pRecvRep = (LANReceivePostReply_t *) reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (pRecvRep->NumberOfContexts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) mpt_lan_receive_post_reply(dev, pRecvRep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) FreeReqFrame = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) "ReceivePostReply received.\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) case MPI_FUNCTION_LAN_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* Just a default reply. Might want to check it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * make sure that everything went ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) FreeReqFrame = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) case MPI_FUNCTION_EVENT_NOTIFICATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) case MPI_FUNCTION_EVENT_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* _EVENT_NOTIFICATION should NOT come down this path any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Should be routed to mpt_lan_event_process(), but just in case...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) FreeReqFrame = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) "reply that I don't know what to do with\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) FreeReqFrame = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return FreeReqFrame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct net_device *dev = ioc->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct mpt_lan_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (dev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (priv->mpt_rxfidx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (reset_phase == MPT_IOC_SETUP_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) } else if (reset_phase == MPT_IOC_PRE_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) atomic_set(&priv->buckets_out, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* Reset Rx Free Tail index and re-populate the queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) spin_lock_irqsave(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) priv->mpt_rxfidx_tail = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) for (i = 0; i < priv->max_buckets_out; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) mpt_lan_post_receive_buckets(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) switch (le32_to_cpu(pEvReply->Event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) case MPI_EVENT_NONE: /* 00 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) case MPI_EVENT_LOG_DATA: /* 01 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) case MPI_EVENT_STATE_CHANGE: /* 02 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) case MPI_EVENT_UNIT_ATTENTION: /* 03 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) case MPI_EVENT_IOC_BUS_RESET: /* 04 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) case MPI_EVENT_EXT_BUS_RESET: /* 05 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) case MPI_EVENT_RESCAN: /* 06 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* Ok, do we need to do anything here? As far as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) I can tell, this is when a new device gets added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) to the loop. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) case MPI_EVENT_LOGOUT: /* 09 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) case MPI_EVENT_EVENT_CHANGE: /* 0A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * NOTE: pEvent->AckRequired handling now done in mptbase.c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Do NOT do it here now!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) mpt_lan_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct mpt_lan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (mpt_lan_reset(dev) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) MPT_ADAPTER *mpt_dev = priv->mpt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (mpt_dev->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) printk ("The ioc is active. Perhaps it needs to be"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) " reset?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) printk ("The ioc in inactive, most likely in the "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) "process of being reset. Please try again in "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) "a moment.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) priv->mpt_txfidx = kmalloc_array(priv->tx_max_out, sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (priv->mpt_txfidx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) priv->mpt_txfidx_tail = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (priv->SendCtl == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) goto out_mpt_txfidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) for (i = 0; i < priv->tx_max_out; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) priv->mpt_rxfidx = kmalloc_array(priv->max_buckets_out, sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (priv->mpt_rxfidx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) goto out_SendCtl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) priv->mpt_rxfidx_tail = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) priv->RcvCtl = kcalloc(priv->max_buckets_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) sizeof(struct BufferControl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (priv->RcvCtl == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) goto out_mpt_rxfidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) for (i = 0; i < priv->max_buckets_out; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /**/ for (i = 0; i < priv->tx_max_out; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /**/ dlprintk((" %xh", priv->mpt_txfidx[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /**/ dlprintk(("\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) mpt_lan_post_receive_buckets(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) IOC_AND_NETDEV_NAMES_s_s(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) " Notifications. This is a bad thing! We're not going "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) "to go ahead, but I'd be leery of system stability at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) "this point.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) out_mpt_rxfidx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) kfree(priv->mpt_rxfidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) priv->mpt_rxfidx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) out_SendCtl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) kfree(priv->SendCtl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) priv->SendCtl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) out_mpt_txfidx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) kfree(priv->mpt_txfidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) priv->mpt_txfidx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) out: return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* Send a LanReset message to the FW. This should result in the FW returning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) any buckets it still has. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) mpt_lan_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) MPT_FRAME_HDR *mf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) LANResetRequest_t *pResetReq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct mpt_lan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (mf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) "Unable to allocate a request frame.\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) pResetReq = (LANResetRequest_t *) mf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) pResetReq->Function = MPI_FUNCTION_LAN_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) pResetReq->ChainOffset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) pResetReq->Reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) pResetReq->PortNumber = priv->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) pResetReq->MsgFlags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) pResetReq->Reserved2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) mpt_lan_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct mpt_lan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) MPT_ADAPTER *mpt_dev = priv->mpt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) mpt_event_deregister(LanCtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) "since driver was loaded, %d still out\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) priv->total_posted,atomic_read(&priv->buckets_out)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) mpt_lan_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) timeout = jiffies + 2 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) schedule_timeout_interruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) for (i = 0; i < priv->max_buckets_out; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (priv->RcvCtl[i].skb != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /**/ "is still out\n", i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) priv->RcvCtl[i].len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dev_kfree_skb(priv->RcvCtl[i].skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) kfree(priv->RcvCtl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) kfree(priv->mpt_rxfidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) for (i = 0; i < priv->tx_max_out; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (priv->SendCtl[i].skb != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) priv->SendCtl[i].len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) dev_kfree_skb(priv->SendCtl[i].skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) kfree(priv->SendCtl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) kfree(priv->mpt_txfidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) atomic_set(&priv->buckets_out, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) IOC_AND_NETDEV_NAMES_s_s(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* Tx timeout handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) mpt_lan_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct mpt_lan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) MPT_ADAPTER *mpt_dev = priv->mpt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (mpt_dev->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) //static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct mpt_lan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) MPT_ADAPTER *mpt_dev = priv->mpt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct sk_buff *sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) u32 ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) sent = priv->SendCtl[ctx].skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) dev->stats.tx_bytes += sent->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) __func__, sent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) priv->SendCtl[ctx].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) dev_kfree_skb_irq(sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) spin_lock_irqsave(&priv->txfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) spin_unlock_irqrestore(&priv->txfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct mpt_lan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) MPT_ADAPTER *mpt_dev = priv->mpt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct sk_buff *sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) int FreeReqFrame = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) u32 *pContext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) u32 ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u8 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) count = pSendRep->NumberOfContexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) le16_to_cpu(pSendRep->IOCStatus)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* Add check for Loginfo Flag in IOCStatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) case MPI_IOCSTATUS_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) dev->stats.tx_packets += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) case MPI_IOCSTATUS_LAN_CANCELED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) case MPI_IOCSTATUS_INVALID_SGL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) dev->stats.tx_errors += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) IOC_AND_NETDEV_NAMES_s_s(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) dev->stats.tx_errors += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) pContext = &pSendRep->BufferContext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) spin_lock_irqsave(&priv->txfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) while (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) sent = priv->SendCtl[ctx].skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) dev->stats.tx_bytes += sent->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) __func__, sent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) priv->SendCtl[ctx].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) dev_kfree_skb_irq(sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) pContext++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) spin_unlock_irqrestore(&priv->txfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) FreeReqFrame = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return FreeReqFrame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct mpt_lan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) MPT_ADAPTER *mpt_dev = priv->mpt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) MPT_FRAME_HDR *mf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) LANSendRequest_t *pSendReq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) SGETransaction32_t *pTrans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) SGESimple64_t *pSimple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) const unsigned char *mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) u16 cur_naa = 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) __func__, skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) spin_lock_irqsave(&priv->txfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (priv->mpt_txfidx_tail < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) spin_unlock_irqrestore(&priv->txfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) printk (KERN_ERR "%s: no tx context available: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) __func__, priv->mpt_txfidx_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) mf = mpt_get_msg_frame(LanCtx, mpt_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (mf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) spin_unlock_irqrestore(&priv->txfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) printk (KERN_ERR "%s: Unable to alloc request frame\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) spin_unlock_irqrestore(&priv->txfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) // dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) // IOC_AND_NETDEV_NAMES_s_s(dev)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) pSendReq = (LANSendRequest_t *) mf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* Set the mac.raw pointer, since this apparently isn't getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * done before we get the skb. Pull the data pointer past the mac data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) skb_pull(skb, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) priv->SendCtl[ctx].skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) priv->SendCtl[ctx].dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) priv->SendCtl[ctx].len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* Message Header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) pSendReq->Reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) pSendReq->Function = MPI_FUNCTION_LAN_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) pSendReq->ChainOffset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) pSendReq->Reserved2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) pSendReq->MsgFlags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) pSendReq->PortNumber = priv->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* Transaction Context Element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) pTrans = (SGETransaction32_t *) pSendReq->SG_List;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) pTrans->ContextSize = sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) pTrans->DetailsLength = 2 * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) pTrans->Flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) pTrans->TransactionContext[0] = cpu_to_le32(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) // dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) // IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) // ctx, skb, skb->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) mac = skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) (mac[0] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) (mac[1] << 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) (mac[3] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) (mac[4] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) (mac[5] << 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* If we ever decide to send more than one Simple SGE per LANSend, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) we will need to make sure that LAST_ELEMENT only gets set on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) last one. Otherwise, bad voodoo and evil funkiness will commence. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) pSimple->FlagsLength = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) ((MPI_SGE_FLAGS_LAST_ELEMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) MPI_SGE_FLAGS_END_OF_BUFFER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) MPI_SGE_FLAGS_SIMPLE_ELEMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) MPI_SGE_FLAGS_SYSTEM_ADDRESS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) MPI_SGE_FLAGS_HOST_TO_IOC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) MPI_SGE_FLAGS_64_BIT_ADDRESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) pSimple->Address.Low = cpu_to_le32((u32) dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (sizeof(dma_addr_t) > sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) pSimple->Address.High = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) mpt_put_msg_frame (LanCtx, mpt_dev, mf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) netif_trans_update(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) le32_to_cpu(pSimple->FlagsLength)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct mpt_lan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) schedule_delayed_work(&priv->post_buckets_task, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) schedule_delayed_work(&priv->post_buckets_task, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) "timer.\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) IOC_AND_NETDEV_NAMES_s_s(dev) ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct mpt_lan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) skb->protocol = mpt_lan_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) "delivered to upper level.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) dev->stats.rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) atomic_read(&priv->buckets_out)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) mpt_lan_wake_post_buckets_task(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) "remaining, %d received back since sod\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) atomic_read(&priv->buckets_out), priv->total_received));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) //static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct mpt_lan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) MPT_ADAPTER *mpt_dev = priv->mpt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) struct sk_buff *skb, *old_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) u32 ctx, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) skb = priv->RcvCtl[ctx].skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) len = GET_LAN_PACKET_LENGTH(tmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (len < MPT_LAN_RX_COPYBREAK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) old_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) skb = (struct sk_buff *)dev_alloc_skb(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) __FILE__, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) skb_put(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) priv->RcvCtl[ctx].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) spin_lock_irqsave(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) atomic_dec(&priv->buckets_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) priv->total_received++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return mpt_lan_receive_skb(dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) mpt_lan_receive_post_free(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) LANReceivePostReply_t *pRecvRep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct mpt_lan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) MPT_ADAPTER *mpt_dev = priv->mpt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) u32 ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) count = pRecvRep->NumberOfContexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) "IOC returned %d buckets, freeing them...\n", count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) spin_lock_irqsave(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) skb = priv->RcvCtl[ctx].skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) // dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) // IOC_AND_NETDEV_NAMES_s_s(dev)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) // dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) // priv, &(priv->buckets_out)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) // dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) priv->RcvCtl[ctx].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) atomic_sub(count, &priv->buckets_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) // for (i = 0; i < priv->max_buckets_out; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) // if (priv->RcvCtl[i].skb != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) // dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) // "is still out\n", i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /**/ "remaining, %d received back since sod.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /**/ atomic_read(&priv->buckets_out), priv->total_received));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) mpt_lan_receive_post_reply(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) LANReceivePostReply_t *pRecvRep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct mpt_lan_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) MPT_ADAPTER *mpt_dev = priv->mpt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct sk_buff *skb, *old_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) u32 len, ctx, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) int i, l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) le16_to_cpu(pRecvRep->IOCStatus)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) MPI_IOCSTATUS_LAN_CANCELED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return mpt_lan_receive_post_free(dev, pRecvRep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) len = le32_to_cpu(pRecvRep->PacketLength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) "ReceivePostReply w/ PacketLength zero!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) IOC_AND_NETDEV_NAMES_s_s(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) count = pRecvRep->NumberOfContexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) skb = priv->RcvCtl[ctx].skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) offset = le32_to_cpu(pRecvRep->PacketOffset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) // if (offset != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) // printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) // "w/ PacketOffset %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) // IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) // offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) // }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) offset, len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) int szrem = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) // dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) // "for single packet, concatenating...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) // IOC_AND_NETDEV_NAMES_s_s(dev)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) skb = (struct sk_buff *)dev_alloc_skb(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) __FILE__, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) spin_lock_irqsave(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) old_skb = priv->RcvCtl[ctx].skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) l = priv->RcvCtl[ctx].len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (szrem < l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) l = szrem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) // dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) // IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) // i, l));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) priv->RcvCtl[ctx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) priv->RcvCtl[ctx].len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) pci_dma_sync_single_for_device(mpt_dev->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) priv->RcvCtl[ctx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) priv->RcvCtl[ctx].len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) szrem -= l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) } else if (len < MPT_LAN_RX_COPYBREAK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) old_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) skb = (struct sk_buff *)dev_alloc_skb(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) __FILE__, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) priv->RcvCtl[ctx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) priv->RcvCtl[ctx].len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) pci_dma_sync_single_for_device(mpt_dev->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) priv->RcvCtl[ctx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) priv->RcvCtl[ctx].len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) spin_lock_irqsave(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) spin_lock_irqsave(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) priv->RcvCtl[ctx].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) priv->RcvCtl[ctx].dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) skb_put(skb,len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) atomic_sub(count, &priv->buckets_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) priv->total_received += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) priv->mpt_rxfidx_tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) MPT_LAN_MAX_BUCKETS_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (remaining == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) "(priv->buckets_out = %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) atomic_read(&priv->buckets_out));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) else if (remaining < 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) "(priv->buckets_out = %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) remaining, atomic_read(&priv->buckets_out));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if ((remaining < priv->bucketthresh) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ((atomic_read(&priv->buckets_out) - remaining) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) printk (KERN_WARNING MYNAM " Mismatch between driver's "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) "buckets_out count and fw's BucketsRemaining "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) "count has crossed the threshold, issuing a "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) "LanReset to clear the fw's hashtable. You may "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) "want to check your /var/log/messages for \"CRC "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) "error\" event notifications.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) mpt_lan_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) mpt_lan_wake_post_buckets_task(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return mpt_lan_receive_skb(dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /* Simple SGE's only at the moment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct net_device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) MPT_ADAPTER *mpt_dev = priv->mpt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) MPT_FRAME_HDR *mf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) LANReceivePostRequest_t *pRecvReq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) SGETransaction32_t *pTrans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) SGESimple64_t *pSimple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) u32 curr, buckets, count, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) u32 len = (dev->mtu + dev->hard_header_len + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) curr = atomic_read(&priv->buckets_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) buckets = (priv->max_buckets_out - curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) __func__, buckets, curr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) while (buckets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) mf = mpt_get_msg_frame(LanCtx, mpt_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (mf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) printk (KERN_ERR "%s: Unable to alloc request frame\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) dioprintk((KERN_ERR "%s: %u buckets remaining\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) __func__, buckets));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) pRecvReq = (LANReceivePostRequest_t *) mf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) mpt_dev->RequestNB[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) count = buckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (count > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) count = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) pRecvReq->ChainOffset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) pRecvReq->MsgFlags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) pRecvReq->PortNumber = priv->pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) pSimple = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) int ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) spin_lock_irqsave(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (priv->mpt_rxfidx_tail < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) printk (KERN_ERR "%s: Can't alloc context\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) spin_unlock_irqrestore(&priv->rxfidx_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) skb = priv->RcvCtl[ctx].skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (skb && (priv->RcvCtl[ctx].len != len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) pci_unmap_single(mpt_dev->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) priv->RcvCtl[ctx].dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) priv->RcvCtl[ctx].len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) dev_kfree_skb(priv->RcvCtl[ctx].skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) skb = priv->RcvCtl[ctx].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) skb = dev_alloc_skb(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) printk (KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) MYNAM "/%s: Can't alloc skb\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) dma = pci_map_single(mpt_dev->pcidev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) len, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) priv->RcvCtl[ctx].skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) priv->RcvCtl[ctx].dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) priv->RcvCtl[ctx].len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) pTrans->ContextSize = sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) pTrans->DetailsLength = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) pTrans->Flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) pTrans->TransactionContext[0] = cpu_to_le32(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) pSimple->FlagsLength = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) ((MPI_SGE_FLAGS_END_OF_BUFFER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) MPI_SGE_FLAGS_SIMPLE_ELEMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (sizeof(dma_addr_t) > sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) pSimple->Address.High = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) pTrans = (SGETransaction32_t *) (pSimple + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (pSimple == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /**/ __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) mpt_free_msg_frame(mpt_dev, mf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) pRecvReq->BucketCount = cpu_to_le32(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /* printk(KERN_INFO MYNAM ": posting buckets\n ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * for (i = 0; i < j + 2; i ++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * printk (" %08x", le32_to_cpu(msg[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * printk ("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) mpt_put_msg_frame(LanCtx, mpt_dev, mf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) priv->total_posted += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) buckets -= i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) atomic_add(i, &priv->buckets_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) __func__, buckets, atomic_read(&priv->buckets_out)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) __func__, priv->total_posted, priv->total_received));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) clear_bit(0, &priv->post_buckets_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) mpt_lan_post_receive_buckets_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) post_buckets_task.work));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) static const struct net_device_ops mpt_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) .ndo_open = mpt_lan_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) .ndo_stop = mpt_lan_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) .ndo_start_xmit = mpt_lan_sdu_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) .ndo_tx_timeout = mpt_lan_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static struct net_device *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct mpt_lan_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) u8 HWaddr[FC_ALEN], *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) dev->mtu = MPT_LAN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) priv->mpt_dev = mpt_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) priv->pnum = pnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) INIT_DELAYED_WORK(&priv->post_buckets_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) mpt_lan_post_receive_buckets_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) priv->post_buckets_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) __LINE__, dev->mtu + dev->hard_header_len + 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) atomic_set(&priv->buckets_out, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) priv->total_posted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) priv->total_received = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) priv->max_buckets_out = max_buckets_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) __LINE__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) mpt_dev->pfacts[0].MaxLanBuckets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) max_buckets_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) priv->max_buckets_out));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) priv->bucketthresh = priv->max_buckets_out * 2 / 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) spin_lock_init(&priv->txfidx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) spin_lock_init(&priv->rxfidx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /* Grab pre-fetched LANPage1 stuff. :-) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) HWaddr[0] = a[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) HWaddr[1] = a[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) HWaddr[2] = a[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) HWaddr[3] = a[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) HWaddr[4] = a[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) HWaddr[5] = a[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) dev->addr_len = FC_ALEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) memcpy(dev->dev_addr, HWaddr, FC_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) memset(dev->broadcast, 0xff, FC_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* The Tx queue is 127 deep on the 909.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * Give ourselves some breathing room.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) tx_max_out_p : MPT_TX_MAX_OUT_LIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) dev->netdev_ops = &mpt_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /* MTU range: 96 - 65280 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) dev->min_mtu = MPT_LAN_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) dev->max_mtu = MPT_LAN_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) dlprintk((KERN_INFO MYNAM ": Finished registering dev "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) "and setting initial values\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (register_netdev(dev) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) "ProtocolFlags=%02Xh (%c%c%c%c)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) ioc->name, ioc->pfacts[i].PortNumber,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) ioc->pfacts[i].ProtocolFlags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) MPT_PROTOCOL_FLAGS_c_c_c_c(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) ioc->pfacts[i].ProtocolFlags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (!(ioc->pfacts[i].ProtocolFlags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) MPI_PORTFACTS_PROTOCOL_LAN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) "seems to be disabled on this adapter port!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) ioc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) dev = mpt_register_lan_device(ioc, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) printk(KERN_ERR MYNAM ": %s: Unable to register "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) "port%d as a LAN device\n", ioc->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) ioc->pfacts[i].PortNumber);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) "registered as '%s'\n", ioc->name, dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) printk(KERN_INFO MYNAM ": %s/%s: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) "LanAddr = %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) IOC_AND_NETDEV_NAMES_s_s(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) ioc->netdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) mptlan_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) struct net_device *dev = ioc->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if(dev != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) static struct mpt_pci_driver mptlan_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) .probe = mptlan_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) .remove = mptlan_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) static int __init mpt_lan_init (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) show_mptmod_ver(LANAME, LANVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) "lan_reply");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (LanCtx <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) "handler with mptbase! The world is at an end! "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) "Everything is fading to black! Goodbye.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) static void __exit mpt_lan_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) mpt_device_driver_deregister(MPTLAN_DRIVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) mpt_reset_deregister(LanCtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (LanCtx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) mpt_deregister(LanCtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) module_init(mpt_lan_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) module_exit(mpt_lan_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) static unsigned short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct fcllc *fcllc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) skb_pull(skb, sizeof(struct mpt_lan_ohdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (fch->dtype == htons(0xffff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) u32 *p = (u32 *) fch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) swab32s(p + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) swab32s(p + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) swab32s(p + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) swab32s(p + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) NETDEV_PTR_TO_IOC_NAME_s(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) fch->saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (*fch->daddr & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) skb->pkt_type = PACKET_BROADCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) skb->pkt_type = PACKET_MULTICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) skb->pkt_type = PACKET_OTHERHOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) skb->pkt_type = PACKET_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) fcllc = (struct fcllc *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) /* Strip the SNAP header from ARP packets since we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * pass them through to the 802.2/SNAP layers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (fcllc->dsap == EXTENDED_SAP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) (fcllc->ethertype == htons(ETH_P_IP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) fcllc->ethertype == htons(ETH_P_ARP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) skb_pull(skb, sizeof(struct fcllc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) return fcllc->ethertype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return htons(ETH_P_802_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/