Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* cnic.c: QLogic CNIC core network driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2006-2014 Broadcom Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2014-2015 QLogic Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/uio_driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #if IS_ENABLED(CONFIG_VLAN_8021Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define BCM_VLAN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <net/route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <net/ip6_route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <net/ip6_checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <scsi/iscsi_if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define BCM_CNIC	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include "cnic_if.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include "bnx2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include "bnx2x/bnx2x.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include "bnx2x/bnx2x_reg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include "bnx2x/bnx2x_fw_defs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include "bnx2x/bnx2x_hsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include "../../../scsi/bnx2fc/bnx2fc_constants.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include "cnic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include "cnic_defs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define CNIC_MODULE_NAME	"cnic"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	"QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	      "Chen (zongxi@broadcom.com");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) MODULE_DESCRIPTION("QLogic cnic Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) MODULE_VERSION(CNIC_MODULE_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static LIST_HEAD(cnic_dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static LIST_HEAD(cnic_udev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static DEFINE_RWLOCK(cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static DEFINE_MUTEX(cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) /* helper function, assuming cnic_lock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	return rcu_dereference_protected(cnic_ulp_tbl[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 					 lockdep_is_held(&cnic_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) static int cnic_service_bnx2(void *, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) static int cnic_service_bnx2x(void *, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static int cnic_ctl(void *, struct cnic_ctl_info *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) static struct cnic_ops cnic_bnx2_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	.cnic_owner	= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	.cnic_handler	= cnic_service_bnx2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	.cnic_ctl	= cnic_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) static struct cnic_ops cnic_bnx2x_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	.cnic_owner	= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	.cnic_handler	= cnic_service_bnx2x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	.cnic_ctl	= cnic_ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) static struct workqueue_struct *cnic_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static void cnic_shutdown_rings(struct cnic_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static void cnic_init_rings(struct cnic_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static int cnic_cm_set_pg(struct cnic_sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	struct cnic_uio_dev *udev = uinfo->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	struct cnic_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	if (udev->uio_dev != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	dev = udev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	udev->uio_dev = iminor(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	cnic_shutdown_rings(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	cnic_init_rings(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	struct cnic_uio_dev *udev = uinfo->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	udev->uio_dev = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) static inline void cnic_hold(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	atomic_inc(&dev->ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) static inline void cnic_put(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	atomic_dec(&dev->ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static inline void csk_hold(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	atomic_inc(&csk->ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static inline void csk_put(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	atomic_dec(&csk->ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	struct cnic_dev *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	read_lock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	list_for_each_entry(cdev, &cnic_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		if (netdev == cdev->netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			cnic_hold(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			read_unlock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	read_unlock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	atomic_inc(&ulp_ops->ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	atomic_dec(&ulp_ops->ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	struct drv_ctl_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	struct drv_ctl_io *io = &info.data.io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	memset(&info, 0, sizeof(struct drv_ctl_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	info.cmd = DRV_CTL_CTX_WR_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	io->cid_addr = cid_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	io->offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	io->data = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	ethdev->drv_ctl(dev->netdev, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	struct drv_ctl_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	struct drv_ctl_io *io = &info.data.io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	memset(&info, 0, sizeof(struct drv_ctl_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	io->offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	io->dma_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	ethdev->drv_ctl(dev->netdev, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	struct drv_ctl_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	struct drv_ctl_l2_ring *ring = &info.data.ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	memset(&info, 0, sizeof(struct drv_ctl_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		info.cmd = DRV_CTL_START_L2_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		info.cmd = DRV_CTL_STOP_L2_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	ring->cid = cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	ring->client_id = cl_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	ethdev->drv_ctl(dev->netdev, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	struct drv_ctl_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct drv_ctl_io *io = &info.data.io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	memset(&info, 0, sizeof(struct drv_ctl_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	info.cmd = DRV_CTL_IO_WR_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	io->offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	io->data = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	ethdev->drv_ctl(dev->netdev, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	struct drv_ctl_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	struct drv_ctl_io *io = &info.data.io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	memset(&info, 0, sizeof(struct drv_ctl_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	info.cmd = DRV_CTL_IO_RD_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	io->offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	ethdev->drv_ctl(dev->netdev, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	return io->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	struct drv_ctl_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	struct fcoe_capabilities *fcoe_cap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		&info.data.register_data.fcoe_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	memset(&info, 0, sizeof(struct drv_ctl_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	if (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	info.data.ulp_type = ulp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	info.drv_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	ethdev->drv_ctl(dev->netdev, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static int cnic_in_use(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	return test_bit(SK_F_INUSE, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	struct drv_ctl_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	memset(&info, 0, sizeof(struct drv_ctl_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	info.cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	info.data.credit.credit_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	ethdev->drv_ctl(dev->netdev, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	if (!cp->ctx_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	for (i = 0; i < cp->max_cid_space; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		if (cp->ctx_tbl[i].cid == cid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			*l5_cid = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			   struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	struct iscsi_path path_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	char *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	u16 len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	struct cnic_ulp_ops *ulp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	struct cnic_uio_dev *udev = cp->udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	int rc = 0, retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	if (!udev || udev->uio_dev == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	if (csk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		len = sizeof(path_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		buf = (char *) &path_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		memset(&path_req, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		msg_type = ISCSI_KEVENT_PATH_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		path_req.handle = (u64) csk->l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		if (test_bit(SK_F_IPV6, &csk->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 			       sizeof(struct in6_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 			path_req.ip_addr_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			       sizeof(struct in_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 			path_req.ip_addr_len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		path_req.vlan_id = csk->vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		path_req.pmtu = csk->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	while (retry < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		if (ulp_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			rc = ulp_ops->iscsi_nl_send_msg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 				cp->ulp_handle[CNIC_ULP_ISCSI],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 				msg_type, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		retry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 				  char *buf, u16 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	int rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	switch (msg_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	case ISCSI_UEVENT_PATH_UPDATE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		struct cnic_local *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		u32 l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		struct cnic_sock *csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		struct iscsi_path *path_resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		if (len < sizeof(*path_resp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		path_resp = (struct iscsi_path *) buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		l5_cid = (u32) path_resp->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		if (l5_cid >= MAX_CM_SK_TBL_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		csk = &cp->csk_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		csk_hold(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		if (cnic_in_use(csk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		    test_bit(SK_F_CONNECT_START, &csk->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			csk->vlan_id = path_resp->vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 			memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 			if (test_bit(SK_F_IPV6, &csk->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 				       sizeof(struct in6_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 				       sizeof(struct in_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 			if (is_valid_ether_addr(csk->ha)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 				cnic_cm_set_pg(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 			} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 				!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 				cnic_cm_upcall(cp, csk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 					L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 				clear_bit(SK_F_CONNECT_START, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		csk_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) static int cnic_offld_prep(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) static int cnic_close_prep(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	clear_bit(SK_F_CONNECT_START, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) static int cnic_abort_prep(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	clear_bit(SK_F_CONNECT_START, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	struct cnic_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		pr_err("%s: Bad type %d\n", __func__, ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	mutex_lock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	if (cnic_ulp_tbl_prot(ulp_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		pr_err("%s: Type %d has already been registered\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		       __func__, ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	read_lock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	list_for_each_entry(dev, &cnic_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	read_unlock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	atomic_set(&ulp_ops->ref_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	/* Prevent race conditions with netdev_event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	list_for_each_entry(dev, &cnic_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			ulp_ops->cnic_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) int cnic_unregister_driver(int ulp_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	struct cnic_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	struct cnic_ulp_ops *ulp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		pr_err("%s: Bad type %d\n", __func__, ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	mutex_lock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	if (!ulp_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		pr_err("%s: Type %d has not been registered\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		       __func__, ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	read_lock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	list_for_each_entry(dev, &cnic_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			pr_err("%s: Type %d still has devices registered\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			       __func__, ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			read_unlock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	read_unlock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	if (atomic_read(&ulp_ops->ref_count) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		pr_warn("%s: Failed waiting for ref count to go to zero\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) static int cnic_start_hw(struct cnic_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) static void cnic_stop_hw(struct cnic_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 				void *ulp_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	struct cnic_ulp_ops *ulp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		pr_err("%s: Bad type %d\n", __func__, ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	mutex_lock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		pr_err("%s: Driver with type %d has not been registered\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		       __func__, ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		pr_err("%s: Type %d has already been registered to this device\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		       __func__, ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	cp->ulp_handle[ulp_type] = ulp_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	cnic_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) EXPORT_SYMBOL(cnic_register_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		pr_err("%s: Bad type %d\n", __func__, ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (ulp_type == CNIC_ULP_ISCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	mutex_lock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		cnic_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		pr_err("%s: device not registered to this ulp type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		       __func__, ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	if (ulp_type == CNIC_ULP_FCOE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		dev->fcoe_cap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	       i < 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) EXPORT_SYMBOL(cnic_unregister_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			    u32 next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	id_tbl->start = start_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	id_tbl->max = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	id_tbl->next = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	spin_lock_init(&id_tbl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	if (!id_tbl->table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	kfree(id_tbl->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	id_tbl->table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	id -= id_tbl->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	if (id >= id_tbl->max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	spin_lock(&id_tbl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (!test_bit(id, id_tbl->table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		set_bit(id, id_tbl->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	spin_unlock(&id_tbl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) /* Returns -1 if not successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	spin_lock(&id_tbl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (id >= id_tbl->max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		if (id_tbl->next != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			if (id >= id_tbl->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 				id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (id < id_tbl->max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		set_bit(id, id_tbl->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		id_tbl->next = (id + 1) & (id_tbl->max - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		id += id_tbl->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	spin_unlock(&id_tbl->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (id == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	id -= id_tbl->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	if (id >= id_tbl->max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	clear_bit(id, id_tbl->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (!dma->pg_arr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	for (i = 0; i < dma->num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		if (dma->pg_arr[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 					  dma->pg_arr[i], dma->pg_map_arr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			dma->pg_arr[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	if (dma->pgtbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 				  dma->pgtbl, dma->pgtbl_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		dma->pgtbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	kfree(dma->pg_arr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	dma->pg_arr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	dma->num_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	__le32 *page_table = (__le32 *) dma->pgtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	for (i = 0; i < dma->num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		/* Each entry needs to be in big endian format. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		page_table++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		page_table++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	__le32 *page_table = (__le32 *) dma->pgtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	for (i = 0; i < dma->num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		/* Each entry needs to be in little endian format. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		page_table++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		page_table++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			  int pages, int use_pg_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	int i, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	if (dma->pg_arr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	dma->num_pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	for (i = 0; i < pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 						    CNIC_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 						    &dma->pg_map_arr[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 						    GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		if (dma->pg_arr[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if (!use_pg_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			  ~(CNIC_PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 					&dma->pgtbl_map, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	if (dma->pgtbl == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	cp->setup_pgtbl(dev, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	cnic_free_dma(dev, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) static void cnic_free_context(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	for (i = 0; i < cp->ctx_blks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		if (cp->ctx_arr[i].ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 					  cp->ctx_arr[i].ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 					  cp->ctx_arr[i].mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			cp->ctx_arr[i].ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	if (udev->l2_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 				  udev->l2_buf, udev->l2_buf_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		udev->l2_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (udev->l2_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 				  udev->l2_ring, udev->l2_ring_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		udev->l2_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) static void __cnic_free_uio(struct cnic_uio_dev *udev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	uio_unregister_device(&udev->cnic_uinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	__cnic_free_uio_rings(udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	pci_dev_put(udev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	kfree(udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) static void cnic_free_uio(struct cnic_uio_dev *udev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if (!udev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	write_lock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	list_del_init(&udev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	write_unlock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	__cnic_free_uio(udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) static void cnic_free_resc(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	struct cnic_uio_dev *udev = cp->udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (udev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		udev->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		cp->udev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		if (udev->uio_dev == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			__cnic_free_uio_rings(udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	cnic_free_context(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	kfree(cp->ctx_arr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	cp->ctx_arr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	cp->ctx_blks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	cnic_free_dma(dev, &cp->gbl_buf_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	cnic_free_dma(dev, &cp->kwq_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	cnic_free_dma(dev, &cp->kwq_16_data_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	cnic_free_dma(dev, &cp->kcq2.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	cnic_free_dma(dev, &cp->kcq1.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	kfree(cp->iscsi_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	cp->iscsi_tbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	kfree(cp->ctx_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	cp->ctx_tbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	cnic_free_id_tbl(&cp->fcoe_cid_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	cnic_free_id_tbl(&cp->cid_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) static int cnic_alloc_context(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		int i, k, arr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		cp->ctx_blk_size = CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			   sizeof(struct cnic_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		if (cp->ctx_arr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			u32 j, reg, off, lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 			if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 				off = BNX2_PG_CTX_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 				off = BNX2_ISCSI_CTX_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			reg = cnic_reg_rd_ind(dev, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			lo = reg >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			hi = reg & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 				cp->ctx_arr[k].cid = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		cp->ctx_blks = k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			cp->ctx_blks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		for (i = 0; i < cp->ctx_blks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			cp->ctx_arr[i].ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 				dma_alloc_coherent(&dev->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 						   CNIC_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 						   &cp->ctx_arr[i].mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 						   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			if (cp->ctx_arr[i].ctx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) static u16 cnic_bnx2_next_idx(u16 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	return idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) static u16 cnic_bnx2_hw_idx(u16 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) static u16 cnic_bnx2x_next_idx(u16 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) static u16 cnic_bnx2x_hw_idx(u16 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 			  bool use_pg_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	int err, i, use_page_tbl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	struct kcqe **kcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	if (use_pg_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		use_page_tbl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	kcq = (struct kcqe **) info->dma.pg_arr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	info->kcq = kcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	info->next_idx = cnic_bnx2_next_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	info->hw_idx = cnic_bnx2_hw_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	if (use_pg_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	info->next_idx = cnic_bnx2x_next_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	info->hw_idx = cnic_bnx2x_hw_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	for (i = 0; i < KCQ_PAGE_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		struct bnx2x_bd_chain_next *next =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		int j = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		if (j >= KCQ_PAGE_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	struct cnic_local *cp = udev->dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	if (udev->l2_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 					   &udev->l2_ring_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 					   GFP_KERNEL | __GFP_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	if (!udev->l2_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 					  &udev->l2_buf_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 					  GFP_KERNEL | __GFP_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (!udev->l2_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		__cnic_free_uio_rings(udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	struct cnic_uio_dev *udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	list_for_each_entry(udev, &cnic_udev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		if (udev->pdev == dev->pcidev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			udev->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			if (__cnic_alloc_uio_rings(udev, pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 				udev->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			cp->udev = udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	if (!udev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	udev->uio_dev = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	udev->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	udev->pdev = dev->pcidev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	if (__cnic_alloc_uio_rings(udev, pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		goto err_udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	list_add(&udev->list, &cnic_udev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	pci_dev_get(udev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	cp->udev = udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)  err_udev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	kfree(udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) static int cnic_init_uio(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	struct cnic_uio_dev *udev = cp->udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	struct uio_info *uinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	if (!udev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	uinfo = &udev->cnic_uinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	uinfo->mem[0].internal_addr = dev->regview;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	uinfo->mem[0].memtype = UIO_MEM_PHYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 						     TX_MAX_TSS_RINGS + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 					CNIC_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		uinfo->name = "bnx2_cnic";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			CNIC_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		uinfo->name = "bnx2x_cnic";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	uinfo->mem[2].size = udev->l2_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	uinfo->mem[3].size = udev->l2_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	uinfo->version = CNIC_MODULE_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	uinfo->irq = UIO_IRQ_CUSTOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	uinfo->open = cnic_uio_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	uinfo->release = cnic_uio_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	if (udev->uio_dev == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		if (!uinfo->priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			uinfo->priv = udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			ret = uio_register_device(&udev->pdev->dev, uinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		cnic_init_rings(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	ret = cnic_alloc_context(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	ret = cnic_alloc_uio_rings(dev, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	ret = cnic_init_uio(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	cnic_free_resc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	int ctx_blk_size = cp->ethdev->ctx_blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	int total_mem, blks, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	blks = total_mem / ctx_blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	if (total_mem % ctx_blk_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		blks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	if (blks > cp->ethdev->ctx_tbl_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	if (cp->ctx_arr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	cp->ctx_blks = blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	cp->ctx_blk_size = ctx_blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	if (!CHIP_IS_E1(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		cp->ctx_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		cp->ctx_align = ctx_blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	for (i = 0; i < blks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		cp->ctx_arr[i].ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 					   &cp->ctx_arr[i].mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 					   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		if (cp->ctx_arr[i].ctx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 				cnic_free_context(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 				cp->ctx_blk_size += cp->ctx_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 				i = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	u32 start_cid = ethdev->starting_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	int i, j, n, ret, pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	cp->max_cid_space = MAX_ISCSI_TBL_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	cp->iscsi_start_cid = start_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		cp->max_cid_space += dev->max_fcoe_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		if (!cp->fcoe_init_cid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			cp->fcoe_init_cid = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	cp->iscsi_tbl = kcalloc(MAX_ISCSI_TBL_SZ, sizeof(struct cnic_iscsi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	if (!cp->iscsi_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	cp->ctx_tbl = kcalloc(cp->max_cid_space, sizeof(struct cnic_context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 			      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (!cp->ctx_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 						   off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		if ((i % n) == (n - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	if (CNIC_SUPPORTS_FCOE(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	ret = cnic_alloc_bnx2x_context(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	cp->l2_rx_ring_size = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	ret = cnic_alloc_uio_rings(dev, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	ret = cnic_init_uio(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	cnic_free_resc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) static inline u32 cnic_kwq_avail(struct cnic_local *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	return cp->max_kwq_idx -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 				  u32 num_wqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	struct kwqe *prod_qe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	u16 prod, sw_prod, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		return -EAGAIN;		/* bnx2 is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	spin_lock_bh(&cp->cnic_ulp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	if (num_wqes > cnic_kwq_avail(cp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	    !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		spin_unlock_bh(&cp->cnic_ulp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	prod = cp->kwq_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	sw_prod = prod & MAX_KWQ_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	for (i = 0; i < num_wqes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		prod++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		sw_prod = prod & MAX_KWQ_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	cp->kwq_prod_idx = prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	spin_unlock_bh(&cp->cnic_ulp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 				   union l5cm_specific_data *l5_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	dma_addr_t map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	map = ctx->kwqe_data_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	l5_data->phy_address.lo = (u64) map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	l5_data->phy_address.hi = (u64) map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	return ctx->kwqe_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 				u32 type, union l5cm_specific_data *l5_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	struct l5cm_spe kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	struct kwqe_16 *kwq[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	u16 type_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	kwqe.hdr.conn_and_cmd_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 			     BNX2X_HW_CID(bp, cid)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		   SPE_HDR_FUNCTION_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	kwqe.hdr.type = cpu_to_le16(type_16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	kwqe.hdr.reserved1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	kwq[0] = (struct kwqe_16 *) &kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	spin_lock_bh(&cp->cnic_ulp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	spin_unlock_bh(&cp->cnic_ulp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 				   struct kcqe *cqes[], u32 num_cqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	struct cnic_ulp_ops *ulp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	if (likely(ulp_ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 					  cqes, num_cqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 				       int en_tcp_dack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	u16 tstorm_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	if (time_stamps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	if (en_tcp_dack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	int hq_bds, pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	u32 pfid = bp->pfid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	cp->num_ccells = req1->num_ccells_per_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 			      cp->num_iscsi_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			BNX2X_ISCSI_R2TQE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	cp->num_cqs = req1->num_cqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	if (!dev->max_iscsi_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	/* init Tstorm RAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		  req1->rq_num_wqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		  CNIC_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		  req1->num_tasks_per_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	/* init Ustorm RAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		  req1->rq_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		  CNIC_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		  req1->num_tasks_per_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		  req1->rq_num_wqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		  req1->cq_num_wqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	/* init Xstorm RAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		  CNIC_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		  req1->num_tasks_per_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		  hq_bds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		  req1->num_tasks_per_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	/* init Cstorm RAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		  CNIC_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		  req1->num_tasks_per_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		  req1->cq_num_wqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		  hq_bds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	cnic_bnx2x_set_tcp_options(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 			req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	u32 pfid = bp->pfid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	struct iscsi_kcqe kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	struct kcqe *cqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	memset(&kcqe, 0, sizeof(kcqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	if (!dev->max_iscsi_conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		kcqe.completion_status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		req2->error_bit_map[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		req2->error_bit_map[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	cqes[0] = (struct kcqe *) &kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		struct cnic_iscsi *iscsi = ctx->proto.iscsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		cnic_free_dma(dev, &iscsi->hq_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		cnic_free_dma(dev, &iscsi->r2tq_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		cnic_free_dma(dev, &iscsi->task_array_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		cnic_free_id(&cp->cid_tbl, ctx->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	ctx->cid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	u32 cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	int ret, pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		if (cid == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		ctx->cid = cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	cid = cnic_alloc_new_id(&cp->cid_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	if (cid == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	ctx->cid = cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	cnic_free_bnx2x_conn_resc(dev, l5_cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 				struct regpair *ctx_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	unsigned long align_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	dma_addr_t ctx_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	void *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	if (cp->ctx_align) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		unsigned long mask = cp->ctx_align - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		if (cp->ctx_arr[blk].mapping & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 			align_off = cp->ctx_align -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 				    (cp->ctx_arr[blk].mapping & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	ctx_map = cp->ctx_arr[blk].mapping + align_off +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		(off * BNX2X_CONTEXT_MEM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	ctx = cp->ctx_arr[blk].ctx + align_off +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	      (off * BNX2X_CONTEXT_MEM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	if (init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	ctx_addr->lo = ctx_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	ctx_addr->hi = (u64) ctx_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 				u32 num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	struct iscsi_kwqe_conn_offload1 *req1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			(struct iscsi_kwqe_conn_offload1 *) wqes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	struct iscsi_kwqe_conn_offload2 *req2 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			(struct iscsi_kwqe_conn_offload2 *) wqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	struct iscsi_kwqe_conn_offload3 *req3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	u32 cid = ctx->cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	u32 hw_cid = BNX2X_HW_CID(bp, cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	struct iscsi_context *ictx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	struct regpair context_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	int i, j, n = 2, n_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	u8 port = BP_PORT(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	ctx->ctx_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	if (!req2->num_additional_wqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	n_max = req2->num_additional_wqes + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	if (ictx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	ictx->xstorm_ag_context.hq_prod = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	ictx->xstorm_st_context.iscsi.first_burst_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		ISCSI_DEF_FIRST_BURST_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	ictx->xstorm_st_context.iscsi.max_send_pdu_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		ISCSI_DEF_MAX_RECV_SEG_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		req1->sq_page_table_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		req1->sq_page_table_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		iscsi->hq_info.pgtbl_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		(u64) iscsi->hq_info.pgtbl_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		iscsi->hq_info.pgtbl[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		iscsi->hq_info.pgtbl[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		iscsi->r2tq_info.pgtbl[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		iscsi->r2tq_info.pgtbl[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		iscsi->task_array_info.pgtbl_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		(u64) iscsi->task_array_info.pgtbl_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		BNX2X_ISCSI_PBL_NOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	ictx->xstorm_st_context.iscsi.flags.flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	ictx->xstorm_st_context.iscsi.flags.flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		ETH_P_8021Q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	    bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	ictx->xstorm_st_context.common.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	ictx->xstorm_st_context.common.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	/* TSTORM requires the base address of RQ DB & not PTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		req2->rq_page_table_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	ictx->tstorm_st_context.tcp.flags2 |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	ictx->tstorm_st_context.tcp.ooo_support_mode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		req2->rq_page_table_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		req2->rq_page_table_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		iscsi->r2tq_info.pgtbl[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		iscsi->r2tq_info.pgtbl[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		req1->cq_page_table_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		req1->cq_page_table_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	ictx->ustorm_st_context.task_pbe_cache_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		BNX2X_ISCSI_PBL_NOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	ictx->ustorm_st_context.task_pdu_cache_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		if (j == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 			if (n >= n_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 			req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 			j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 			req3->qp_first_pte[j].hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 			req3->qp_first_pte[j].lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	ictx->ustorm_st_context.task_pbl_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		iscsi->task_array_info.pgtbl_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	ictx->ustorm_st_context.task_pbl_base.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		(u64) iscsi->task_array_info.pgtbl_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	ictx->ustorm_st_context.tce_phy_addr.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		iscsi->task_array_info.pgtbl[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	ictx->ustorm_st_context.tce_phy_addr.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		iscsi->task_array_info.pgtbl[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	ictx->ustorm_st_context.num_cqs = cp->num_cqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	ictx->ustorm_st_context.negotiated_rx_and_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		ISCSI_DEF_MAX_BURST_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	ictx->ustorm_st_context.negotiated_rx |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	ictx->cstorm_st_context.hq_pbl_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		iscsi->hq_info.pgtbl_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	ictx->cstorm_st_context.hq_pbl_base.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		(u64) iscsi->hq_info.pgtbl_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	ictx->cstorm_st_context.task_pbl_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		iscsi->task_array_info.pgtbl_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	ictx->cstorm_st_context.task_pbl_base.hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		(u64) iscsi->task_array_info.pgtbl_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	/* CSTORM and USTORM initialization is different, CSTORM requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	 * CQ DB base & not PTE addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	ictx->cstorm_st_context.cq_db_base.lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	for (i = 0; i < cp->num_cqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 			ISCSI_INITIAL_SN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 			ISCSI_INITIAL_SN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	ictx->xstorm_ag_context.cdu_reserved =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 				       ISCSI_CONNECTION_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	ictx->ustorm_ag_context.cdu_usage =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 				       ISCSI_CONNECTION_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 				   u32 num, int *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	struct iscsi_kwqe_conn_offload1 *req1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	struct iscsi_kwqe_conn_offload2 *req2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	struct cnic_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	struct iscsi_kcqe kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	struct kcqe *cqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	u32 l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	if (num < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		*work = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	if ((num - 2) < req2->num_additional_wqes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		*work = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	*work = 2 + req2->num_additional_wqes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	l5_cid = req1->iscsi_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	if (l5_cid >= MAX_ISCSI_TBL_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	memset(&kcqe, 0, sizeof(kcqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	kcqe.iscsi_conn_id = l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	ctx = &cp->ctx_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		kcqe.completion_status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		atomic_dec(&cp->iscsi_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		atomic_dec(&cp->iscsi_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		cnic_free_bnx2x_conn_resc(dev, l5_cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		atomic_dec(&cp->iscsi_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	cqes[0] = (struct kcqe *) &kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	struct iscsi_kwqe_conn_update *req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		(struct iscsi_kwqe_conn_update *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	memcpy(data, kwqe, sizeof(struct kwqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	u32 hw_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	init_waitqueue_head(&ctx->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	ctx->wait_cond = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	memset(&l5_data, 0, sizeof(l5_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	hw_cid = BNX2X_HW_CID(bp, ctx->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	struct iscsi_kwqe_conn_destroy *req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		(struct iscsi_kwqe_conn_destroy *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	u32 l5_cid = req->reserved0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	struct iscsi_kcqe kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	struct kcqe *cqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		goto skip_cfc_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		if (delta > (2 * HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 			delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		queue_delayed_work(cnic_wq, &cp->delete_task, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		goto destroy_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) skip_cfc_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	cnic_free_bnx2x_conn_resc(dev, l5_cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		atomic_dec(&cp->iscsi_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) destroy_reply:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	memset(&kcqe, 0, sizeof(kcqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	kcqe.iscsi_conn_id = l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	kcqe.iscsi_conn_context_id = req->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	cqes[0] = (struct kcqe *) &kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 				      struct l4_kwq_connect_req1 *kwqe1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 				      struct l4_kwq_connect_req3 *kwqe3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 				      struct l5cm_active_conn_buffer *conn_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	struct l5cm_xstorm_conn_buffer *xstorm_buf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		&conn_buf->xstorm_conn_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	struct l5cm_tstorm_conn_buffer *tstorm_buf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		&conn_buf->tstorm_conn_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	struct regpair context_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	u32 cid = BNX2X_SW_CID(kwqe1->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	struct in6_addr src_ip, dst_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	u32 *addrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	addrp = (u32 *) &conn_addr->local_ip_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	for (i = 0; i < 4; i++, addrp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	addrp = (u32 *) &conn_addr->remote_ip_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	for (i = 0; i < 4; i++, addrp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	xstorm_buf->context_addr.hi = context_addr.hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	xstorm_buf->context_addr.lo = context_addr.lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	xstorm_buf->mss = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	xstorm_buf->rcv_buf = kwqe3->rcv_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	xstorm_buf->pseudo_header_checksum =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	if (kwqe3->ka_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		tstorm_buf->ka_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		tstorm_buf->ka_timeout = kwqe3->ka_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		tstorm_buf->ka_interval = kwqe3->ka_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	tstorm_buf->max_rt_time = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	u32 pfid = bp->pfid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	u8 *mac = dev->mac_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		 mac[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		 mac[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		 mac[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 			      u32 num, int *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	struct l4_kwq_connect_req1 *kwqe1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		(struct l4_kwq_connect_req1 *) wqes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	struct l4_kwq_connect_req3 *kwqe3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	struct l5cm_active_conn_buffer *conn_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	struct l5cm_conn_addr_params *conn_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	u32 l5_cid = kwqe1->pg_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	if (num < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		*work = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		*work = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		*work = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	if (num < *work) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		*work = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		netdev_err(dev->netdev, "conn_buf size too big\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	if (!conn_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	memset(conn_buf, 0, sizeof(*conn_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	conn_addr = &conn_buf->conn_addr_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	conn_addr->remote_addr_0 = csk->ha[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	conn_addr->remote_addr_1 = csk->ha[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	conn_addr->remote_addr_2 = csk->ha[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	conn_addr->remote_addr_3 = csk->ha[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	conn_addr->remote_addr_4 = csk->ha[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	conn_addr->remote_addr_5 = csk->ha[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 		struct l4_kwq_connect_req2 *kwqe2 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 			(struct l4_kwq_connect_req2 *) wqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	conn_addr->local_tcp_port = kwqe1->src_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	conn_addr->remote_tcp_port = kwqe1->dst_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	conn_addr->pmtu = kwqe3->pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 			kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	memset(&l5_data, 0, sizeof(l5_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	memset(&l5_data, 0, sizeof(l5_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	struct l4_kcq kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	struct kcqe *cqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	memset(&kcqe, 0, sizeof(kcqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	kcqe.pg_host_opaque = req->host_opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	kcqe.pg_cid = req->host_opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	cqes[0] = (struct kcqe *) &kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	struct l4_kcq kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	struct kcqe *cqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	memset(&kcqe, 0, sizeof(kcqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	kcqe.pg_host_opaque = req->pg_host_opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	kcqe.pg_cid = req->pg_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	cqes[0] = (struct kcqe *) &kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	struct fcoe_kwqe_stat *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	struct fcoe_stat_ramrod_params *fcoe_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	u32 cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	req = (struct fcoe_kwqe_stat *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	if (!fcoe_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 				  FCOE_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 				 u32 num, int *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	u32 cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	struct fcoe_init_ramrod_params *fcoe_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	struct fcoe_kwqe_init1 *req1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	struct fcoe_kwqe_init2 *req2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	struct fcoe_kwqe_init3 *req3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	if (num < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		*work = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	req1 = (struct fcoe_kwqe_init1 *) wqes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	req2 = (struct fcoe_kwqe_init2 *) wqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	req3 = (struct fcoe_kwqe_init3 *) wqes[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		*work = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		*work = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		netdev_err(dev->netdev, "fcoe_init size too big\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	if (!fcoe_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	memset(fcoe_init, 0, sizeof(*fcoe_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	fcoe_init->sb_num = cp->status_blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	fcoe_init->eq_prod = MAX_KCQ_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	cp->kcq2.sw_prod_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 				  FCOE_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	*work = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 				 u32 num, int *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	u32 cid = -1, l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	struct fcoe_kwqe_conn_offload1 *req1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	struct fcoe_kwqe_conn_offload2 *req2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	struct fcoe_kwqe_conn_offload3 *req3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	struct fcoe_kwqe_conn_offload4 *req4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	struct fcoe_conn_offload_ramrod_params *fcoe_offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	struct cnic_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	struct fcoe_context *fctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	struct regpair ctx_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	struct fcoe_kcqe kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	struct kcqe *cqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	if (num < 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 		*work = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	*work = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	l5_cid = req1->fcoe_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	if (l5_cid >= dev->max_fcoe_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		goto err_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	l5_cid += BNX2X_FCOE_L5_CID_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	ctx = &cp->ctx_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 		goto err_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		goto err_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	cid = ctx->cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	if (fctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 		u32 hw_cid = BNX2X_HW_CID(bp, cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 		u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 					     FCOE_CONNECTION_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		fctx->xstorm_ag_context.cdu_reserved = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 					     FCOE_CONNECTION_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		fctx->ustorm_ag_context.cdu_usage = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 		netdev_err(dev->netdev, "fcoe_offload size too big\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 		goto err_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	if (!fcoe_offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 		goto err_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	memset(fcoe_offload, 0, sizeof(*fcoe_offload));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	cid = BNX2X_HW_CID(bp, cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 				  FCOE_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) err_reply:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	if (cid != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		cnic_free_bnx2x_conn_resc(dev, l5_cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	memset(&kcqe, 0, sizeof(kcqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	kcqe.fcoe_conn_id = req1->fcoe_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	cqes[0] = (struct kcqe *) &kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	struct fcoe_kwqe_conn_enable_disable *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	u32 cid, l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	cid = req->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		netdev_err(dev->netdev, "fcoe_enable size too big\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	if (!fcoe_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	memset(fcoe_enable, 0, sizeof(*fcoe_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 				  FCOE_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	struct fcoe_kwqe_conn_enable_disable *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	u32 cid, l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	cid = req->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	l5_cid = req->conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	if (l5_cid >= dev->max_fcoe_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	l5_cid += BNX2X_FCOE_L5_CID_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 		netdev_err(dev->netdev, "fcoe_disable size too big\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	if (!fcoe_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	memset(fcoe_disable, 0, sizeof(*fcoe_disable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 				  FCOE_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	struct fcoe_kwqe_conn_destroy *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	u32 cid, l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	struct cnic_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	struct fcoe_kcqe kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	struct kcqe *cqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	req = (struct fcoe_kwqe_conn_destroy *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	cid = req->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	l5_cid = req->conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	if (l5_cid >= dev->max_fcoe_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	l5_cid += BNX2X_FCOE_L5_CID_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	ctx = &cp->ctx_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	init_waitqueue_head(&ctx->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	ctx->wait_cond = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	memset(&kcqe, 0, sizeof(kcqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	memset(&l5_data, 0, sizeof(l5_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 				  FCOE_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		if (ctx->wait_cond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 			kcqe.completion_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	kcqe.fcoe_conn_id = req->conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	kcqe.fcoe_conn_context_id = cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	cqes[0] = (struct kcqe *) &kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	for (i = start_cid; i < cp->max_cid_space; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		struct cnic_context *ctx = &cp->ctx_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 			msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		for (j = 0; j < 5; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 			msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 			netdev_warn(dev->netdev, "CID %x not deleted\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 				   ctx->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	u32 cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	memset(&l5_data, 0, sizeof(l5_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 				  FCOE_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	struct kcqe kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 	struct kcqe *cqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	u32 cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	u32 kcqe_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	int ulp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	cid = kwqe->kwqe_info0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	memset(&kcqe, 0, sizeof(kcqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 		u32 l5_cid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 		ulp_type = CNIC_ULP_FCOE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 			struct fcoe_kwqe_conn_enable_disable *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 			req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 			kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 			cid = req->context_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 			l5_cid = req->conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 		} else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 			kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 		kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		kcqe.kcqe_info2 = cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		kcqe.kcqe_info0 = l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 		ulp_type = CNIC_ULP_ISCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 			cid = kwqe->kwqe_info1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 		kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 		kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 		kcqe.kcqe_info2 = cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 		struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 		ulp_type = CNIC_ULP_L4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 		if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 			kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 		else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 			kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 		else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 			kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 				    KCQE_FLAGS_LAYER_MASK_L4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 		l4kcqe->cid = cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	cqes[0] = &kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 					 struct kwqe *wqes[], u32 num_wqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	int i, work, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	u32 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	struct kwqe *kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 		return -EAGAIN;		/* bnx2 is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	for (i = 0; i < num_wqes; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 		kwqe = wqes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 		work = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 		switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 		case ISCSI_KWQE_OPCODE_INIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 			ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 		case ISCSI_KWQE_OPCODE_INIT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 			ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 		case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 			ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 						     num_wqes - i, &work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 		case ISCSI_KWQE_OPCODE_UPDATE_CONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 			ret = cnic_bnx2x_iscsi_update(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 		case ISCSI_KWQE_OPCODE_DESTROY_CONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 			ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 		case L4_KWQE_OPCODE_VALUE_CONNECT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 			ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 						 &work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 		case L4_KWQE_OPCODE_VALUE_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 			ret = cnic_bnx2x_close(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		case L4_KWQE_OPCODE_VALUE_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 			ret = cnic_bnx2x_reset(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 			ret = cnic_bnx2x_offload_pg(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 			ret = cnic_bnx2x_update_pg(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 				   opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 				   opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 			/* Possibly bnx2x parity error, send completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 			 * to ulp drivers with error code to speed up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 			 * cleanup and reset recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 			if (ret == -EIO || ret == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 				cnic_bnx2x_kwqe_err(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 		i += work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 					struct kwqe *wqes[], u32 num_wqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	int i, work, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	u32 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	struct kwqe *kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 		return -EAGAIN;		/* bnx2 is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	for (i = 0; i < num_wqes; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		kwqe = wqes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 		work = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 		case FCOE_KWQE_OPCODE_INIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 			ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 						    num_wqes - i, &work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 		case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 			ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 						    num_wqes - i, &work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 		case FCOE_KWQE_OPCODE_ENABLE_CONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 			ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		case FCOE_KWQE_OPCODE_DISABLE_CONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 			ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		case FCOE_KWQE_OPCODE_DESTROY_CONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 			ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 		case FCOE_KWQE_OPCODE_DESTROY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 			ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 		case FCOE_KWQE_OPCODE_STAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 			ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 				   opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 				   opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 			/* Possibly bnx2x parity error, send completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 			 * to ulp drivers with error code to speed up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 			 * cleanup and reset recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 			if (ret == -EIO || ret == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 				cnic_bnx2x_kwqe_err(dev, kwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 		i += work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 				   u32 num_wqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	u32 layer_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 		return -EAGAIN;		/* bnx2x is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	if (!num_wqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	switch (layer_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	case KWQE_FLAGS_LAYER_MASK_L4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 	case KWQE_FLAGS_LAYER_MASK_L2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 		return KCQE_FLAGS_LAYER_MASK_L4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	return opflag & KCQE_FLAGS_LAYER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) static void service_kcqes(struct cnic_dev *dev, int num_cqes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	int i, j, comp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	j = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	while (num_cqes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 		struct cnic_ulp_ops *ulp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 		int ulp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 		u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 			comp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 		while (j < num_cqes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 			if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 				comp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 			j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 			ulp_type = CNIC_ULP_RDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 			ulp_type = CNIC_ULP_ISCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 			ulp_type = CNIC_ULP_FCOE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 			ulp_type = CNIC_ULP_L4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 			goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 			netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 				   kcqe_op_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 			goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 		if (likely(ulp_ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 						  cp->completed_kcq + i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 		num_cqes -= j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		i += j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 		j = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	if (unlikely(comp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 		cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	u16 i, ri, hw_prod, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	struct kcqe *kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	int kcqe_cnt = 0, last_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	i = ri = last = info->sw_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	ri &= MAX_KCQ_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	hw_prod = *info->hw_prod_idx_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	hw_prod = info->hw_idx(hw_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		cp->completed_kcq[kcqe_cnt++] = kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 		i = info->next_idx(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 		ri = i & MAX_KCQ_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 			last_cnt = kcqe_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 			last = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 	info->sw_prod_idx = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	return last_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) static int cnic_l2_completion(struct cnic_local *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	u16 hw_cons, sw_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	struct cnic_uio_dev *udev = cp->udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 					(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	int comp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	hw_cons = *cp->rx_cons_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 	if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 		hw_cons++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 	sw_cons = cp->rx_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	while (sw_cons != hw_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 		u8 cqe_fp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 		cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 		if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 			cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 			cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 			    cmd == RAMROD_CMD_ID_ETH_HALT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 				comp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 		sw_cons = BNX2X_NEXT_RCQE(sw_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	return comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) static void cnic_chk_pkt_rings(struct cnic_local *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	u16 rx_cons, tx_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	int comp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	rx_cons = *cp->rx_cons_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	tx_cons = *cp->tx_cons_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 			comp = cnic_l2_completion(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 		cp->tx_cons = tx_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 		cp->rx_cons = rx_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 		if (cp->udev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 			uio_event_notify(&cp->udev->cnic_uinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	if (comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 		clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 	int kcqe_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	/* status block index must be read before reading other fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 	rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 		service_kcqes(dev, kcqe_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 		/* Tell compiler that status_blk fields can change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 		status_idx = (u16) *cp->kcq1.status_idx_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 		/* status block index must be read first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 		rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 		cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	cnic_chk_pkt_rings(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	return status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) static int cnic_service_bnx2(void *data, void *status_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	struct cnic_dev *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 		struct status_block *sblk = status_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 		return sblk->status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	return cnic_service_bnx2_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) static void cnic_service_bnx2_msix(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	struct cnic_dev *dev = cp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	cp->last_status_idx = cnic_service_bnx2_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) static void cnic_doirq(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 		u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 		prefetch(cp->status_blk.gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 		tasklet_schedule(&cp->cnic_irq_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) static irqreturn_t cnic_irq(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	struct cnic_dev *dev = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	if (cp->ack_int)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 		cp->ack_int(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	cnic_doirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 				      u16 index, u8 op, u8 update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 		       COMMAND_REG_INT_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 	struct igu_ack_register igu_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	igu_ack.status_block_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	igu_ack.sb_id_and_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 			((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 			    u16 index, u8 op, u8 update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	struct igu_regular cmd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 	cmd_data.sb_id_and_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 		(index << IGU_REGULAR_SB_INDEX_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 		(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 		(update << IGU_REGULAR_BUPDATE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		(op << IGU_REGULAR_ENABLE_INT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 			   IGU_INT_DISABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 			IGU_INT_DISABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 			   IGU_INT_ENABLE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 			IGU_INT_ENABLE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 	u32 last_status = *info->status_idx_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 	int kcqe_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 	/* status block index must be read before reading the KCQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 	rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 		service_kcqes(dev, kcqe_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 		/* Tell compiler that sblk fields can change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 		last_status = *info->status_idx_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 		/* status block index must be read before reading the KCQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 		rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 	return last_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	struct cnic_dev *dev = cp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	u32 status_idx, new_status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 		CNIC_WR16(dev, cp->kcq1.io_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 		if (!CNIC_SUPPORTS_FCOE(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 			cp->arm_int(dev, status_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 		new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 		if (new_status_idx != status_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 		CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 			  MAX_KCQ_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 		cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 				status_idx, IGU_INT_ENABLE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) static int cnic_service_bnx2x(void *data, void *status_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 	struct cnic_dev *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 	if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 		cnic_doirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 	cnic_chk_pkt_rings(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 	struct cnic_ulp_ops *ulp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 	if (if_type == CNIC_ULP_ISCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 	mutex_lock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 					    lockdep_is_held(&cnic_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 	if (!ulp_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 		mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 	set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 	mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 	if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 		ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 	clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) static void cnic_ulp_stop(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	int if_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 		cnic_ulp_stop_one(cp, if_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) static void cnic_ulp_start(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	int if_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 		struct cnic_ulp_ops *ulp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 		mutex_lock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 						    lockdep_is_held(&cnic_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 		if (!ulp_ops || !ulp_ops->cnic_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 			mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 		mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	struct cnic_ulp_ops *ulp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	mutex_lock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 					    lockdep_is_held(&cnic_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 	if (ulp_ops && ulp_ops->cnic_get_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 		rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) static int cnic_ctl(void *data, struct cnic_ctl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 	struct cnic_dev *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 	int ulp_type = CNIC_ULP_ISCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 	switch (info->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 	case CNIC_CTL_STOP_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 		cnic_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 		cnic_ulp_stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 		cnic_stop_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 		cnic_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 	case CNIC_CTL_START_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 		cnic_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 		if (!cnic_start_hw(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 			cnic_ulp_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 		cnic_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	case CNIC_CTL_STOP_ISCSI_CMD: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 		struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 		set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 		queue_delayed_work(cnic_wq, &cp->delete_task, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 	case CNIC_CTL_COMPLETION_CMD: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 		struct cnic_ctl_completion *comp = &info->data.comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 		u32 cid = BNX2X_SW_CID(comp->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 		u32 l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 		struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 		if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 			if (unlikely(comp->error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 				netdev_err(dev->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 					   "CID %x CFC delete comp error %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 					   cid, comp->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 			ctx->wait_cond = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 			wake_up(&ctx->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 	case CNIC_CTL_FCOE_STATS_GET_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 		ulp_type = CNIC_ULP_FCOE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 	case CNIC_CTL_ISCSI_STATS_GET_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 		cnic_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 		cnic_copy_ulp_stats(dev, ulp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 		cnic_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) static void cnic_ulp_init(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 		struct cnic_ulp_ops *ulp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 		mutex_lock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		ulp_ops = cnic_ulp_tbl_prot(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 		if (!ulp_ops || !ulp_ops->cnic_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 			mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 		ulp_get(ulp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 		mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 			ulp_ops->cnic_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 		ulp_put(ulp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) static void cnic_ulp_exit(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 		struct cnic_ulp_ops *ulp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 		mutex_lock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 		ulp_ops = cnic_ulp_tbl_prot(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 		if (!ulp_ops || !ulp_ops->cnic_exit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 			mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 		ulp_get(ulp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 		mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 			ulp_ops->cnic_exit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 		ulp_put(ulp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) static int cnic_cm_offload_pg(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 	struct cnic_dev *dev = csk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 	struct l4_kwq_offload_pg *l4kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 	struct kwqe *wqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	memset(l4kwqe, 0, sizeof(*l4kwqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	wqes[0] = (struct kwqe *) l4kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	l4kwqe->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 	l4kwqe->l2hdr_nbytes = ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 	l4kwqe->da0 = csk->ha[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 	l4kwqe->da1 = csk->ha[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	l4kwqe->da2 = csk->ha[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 	l4kwqe->da3 = csk->ha[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 	l4kwqe->da4 = csk->ha[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 	l4kwqe->da5 = csk->ha[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 	l4kwqe->sa0 = dev->mac_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 	l4kwqe->sa1 = dev->mac_addr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 	l4kwqe->sa2 = dev->mac_addr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	l4kwqe->sa3 = dev->mac_addr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 	l4kwqe->sa4 = dev->mac_addr[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	l4kwqe->sa5 = dev->mac_addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 	l4kwqe->etype = ETH_P_IP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	l4kwqe->ipid_start = DEF_IPID_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 	l4kwqe->host_opaque = csk->l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	if (csk->vlan_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 		l4kwqe->vlan_tag = csk->vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 		l4kwqe->l2hdr_nbytes += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	return dev->submit_kwqes(dev, wqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) static int cnic_cm_update_pg(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	struct cnic_dev *dev = csk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 	struct l4_kwq_update_pg *l4kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	struct kwqe *wqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 	memset(l4kwqe, 0, sizeof(*l4kwqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 	wqes[0] = (struct kwqe *) l4kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 	l4kwqe->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 	l4kwqe->pg_cid = csk->pg_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 	l4kwqe->da0 = csk->ha[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 	l4kwqe->da1 = csk->ha[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	l4kwqe->da2 = csk->ha[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	l4kwqe->da3 = csk->ha[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	l4kwqe->da4 = csk->ha[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 	l4kwqe->da5 = csk->ha[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 	l4kwqe->pg_host_opaque = csk->l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 	return dev->submit_kwqes(dev, wqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) static int cnic_cm_upload_pg(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 	struct cnic_dev *dev = csk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 	struct l4_kwq_upload *l4kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 	struct kwqe *wqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 	memset(l4kwqe, 0, sizeof(*l4kwqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 	wqes[0] = (struct kwqe *) l4kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 	l4kwqe->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 	l4kwqe->cid = csk->pg_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 	return dev->submit_kwqes(dev, wqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) static int cnic_cm_conn_req(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 	struct cnic_dev *dev = csk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 	struct l4_kwq_connect_req1 *l4kwqe1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 	struct l4_kwq_connect_req2 *l4kwqe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 	struct l4_kwq_connect_req3 *l4kwqe3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 	struct kwqe *wqes[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 	u8 tcp_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 	int num_wqes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 	l4kwqe3->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 	l4kwqe3->ka_timeout = csk->ka_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 	l4kwqe3->ka_interval = csk->ka_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 	l4kwqe3->tos = csk->tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 	l4kwqe3->ttl = csk->ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 	l4kwqe3->pmtu = csk->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 	l4kwqe3->rcv_buf = csk->rcv_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 	l4kwqe3->snd_buf = csk->snd_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	l4kwqe3->seed = csk->seed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 	wqes[0] = (struct kwqe *) l4kwqe1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 	if (test_bit(SK_F_IPV6, &csk->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 		wqes[1] = (struct kwqe *) l4kwqe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 		wqes[2] = (struct kwqe *) l4kwqe3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 		num_wqes = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 		l4kwqe2->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 			       sizeof(struct tcphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 		wqes[1] = (struct kwqe *) l4kwqe3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 			       sizeof(struct tcphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 	l4kwqe1->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	l4kwqe1->cid = csk->cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 	l4kwqe1->pg_cid = csk->pg_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 	if (csk->tcp_flags & SK_TCP_NAGLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 	if (csk->tcp_flags & SK_TCP_SACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 	l4kwqe1->tcp_flags = tcp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	return dev->submit_kwqes(dev, wqes, num_wqes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) static int cnic_cm_close_req(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 	struct cnic_dev *dev = csk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 	struct l4_kwq_close_req *l4kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 	struct kwqe *wqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 	memset(l4kwqe, 0, sizeof(*l4kwqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 	wqes[0] = (struct kwqe *) l4kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	l4kwqe->cid = csk->cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	return dev->submit_kwqes(dev, wqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) static int cnic_cm_abort_req(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	struct cnic_dev *dev = csk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 	struct l4_kwq_reset_req *l4kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 	struct kwqe *wqes[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 	memset(l4kwqe, 0, sizeof(*l4kwqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	wqes[0] = (struct kwqe *) l4kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 	l4kwqe->cid = csk->cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 	return dev->submit_kwqes(dev, wqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 			  u32 l5_cid, struct cnic_sock **csk, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	struct cnic_sock *csk1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	if (l5_cid >= MAX_CM_SK_TBL_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	if (cp->ctx_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 		struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 	csk1 = &cp->csk_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	if (atomic_read(&csk1->ref_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 	csk1->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 	csk1->cid = cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 	csk1->l5_cid = l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 	csk1->ulp_type = ulp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 	csk1->context = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 	csk1->ka_timeout = DEF_KA_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 	csk1->ka_interval = DEF_KA_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 	csk1->tos = DEF_TOS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 	csk1->ttl = DEF_TTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 	csk1->rcv_buf = DEF_RCV_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 	csk1->snd_buf = DEF_SND_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 	csk1->seed = DEF_SEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 	csk1->tcp_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	*csk = csk1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) static void cnic_cm_cleanup(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	if (csk->src_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 		struct cnic_dev *dev = csk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 		struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 		cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 		csk->src_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) static void cnic_close_conn(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 		cnic_cm_upload_pg(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 	cnic_cm_cleanup(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) static int cnic_cm_destroy(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 	if (!cnic_in_use(csk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	csk_hold(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	clear_bit(SK_F_INUSE, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	while (atomic_read(&csk->ref_count) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 		msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 	cnic_cm_cleanup(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 	csk->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 	csk_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) static inline u16 cnic_get_vlan(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 				struct net_device **vlan_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 	if (is_vlan_dev(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 		*vlan_dev = vlan_dev_real_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 		return vlan_dev_vlan_id(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 	*vlan_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 			     struct dst_entry **dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) #if defined(CONFIG_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 	struct rtable *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 	if (!IS_ERR(rt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 		*dst = &rt->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	return PTR_ERR(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 	return -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 			     struct dst_entry **dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 	struct flowi6 fl6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 	memset(&fl6, 0, sizeof(fl6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 	fl6.daddr = dst_addr->sin6_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 		fl6.flowi6_oif = dst_addr->sin6_scope_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 	*dst = ip6_route_output(&init_net, NULL, &fl6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 	if ((*dst)->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 		dst_release(*dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 		*dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 		return -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 	return -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 					   int ulp_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	struct cnic_dev *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 	struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 	struct net_device *netdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 	int err = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 	if (dst_addr->sin_family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 		err = cnic_get_v4_route(dst_addr, &dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 	else if (dst_addr->sin_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 		struct sockaddr_in6 *dst_addr6 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 			(struct sockaddr_in6 *) dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 		err = cnic_get_v6_route(dst_addr6, &dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 	if (!dst->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 	cnic_get_vlan(dst->dev, &netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 	dev = cnic_from_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 	dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 	if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 		cnic_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 	return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 	struct cnic_dev *dev = csk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 	struct cnic_dev *dev = csk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 	int is_v6, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 	struct dst_entry *dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 	struct net_device *realdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 	__be16 local_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 	u32 port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 	if (saddr->local.v6.sin6_family == AF_INET6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 	    saddr->remote.v6.sin6_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 		is_v6 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 	else if (saddr->local.v4.sin_family == AF_INET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 		 saddr->remote.v4.sin_family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 		is_v6 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 	clear_bit(SK_F_IPV6, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 	if (is_v6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 		set_bit(SK_F_IPV6, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 		cnic_get_v6_route(&saddr->remote.v6, &dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 		       sizeof(struct in6_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 		csk->dst_port = saddr->remote.v6.sin6_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 		local_port = saddr->local.v6.sin6_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 		cnic_get_v4_route(&saddr->remote.v4, &dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 		csk->dst_port = saddr->remote.v4.sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 		local_port = saddr->local.v4.sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 	csk->vlan_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 	csk->mtu = dev->netdev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 	if (dst && dst->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 		u16 vlan = cnic_get_vlan(dst->dev, &realdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 		if (realdev == dev->netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 			csk->vlan_id = vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 			csk->mtu = dst_mtu(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 	port_id = be16_to_cpu(local_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 	if (port_id >= CNIC_LOCAL_PORT_MIN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 	    port_id < CNIC_LOCAL_PORT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 		if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 			port_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 		port_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 	if (!port_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 		port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 		if (port_id == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 			rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 			goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 		local_port = cpu_to_be16(port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 	csk->src_port = local_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 	dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) static void cnic_init_csk_state(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 	csk->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 	clear_bit(SK_F_CLOSING, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 	struct cnic_local *cp = csk->dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 	if (!cnic_in_use(csk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 	cnic_init_csk_state(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 	err = cnic_get_route(csk, saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 	err = cnic_resolve_addr(csk, saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 	clear_bit(SK_F_CONNECT_START, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) static int cnic_cm_abort(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 	struct cnic_local *cp = csk->dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 	u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 	if (!cnic_in_use(csk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 	if (cnic_abort_prep(csk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 		return cnic_cm_abort_req(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 	/* Getting here means that we haven't started connect, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 	 * connect was not successful, or it has been reset by the target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 	cp->close_conn(csk, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 	if (csk->state != opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 		/* Wait for remote reset sequence to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 		while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 			msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 		return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) static int cnic_cm_close(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 	if (!cnic_in_use(csk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 	if (cnic_close_prep(csk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 		return cnic_cm_close_req(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 		/* Wait for remote reset sequence to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 		while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 			msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 		return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 			   u8 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 	struct cnic_ulp_ops *ulp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 	int ulp_type = csk->ulp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 	if (ulp_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 			ulp_ops->cm_connect_complete(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 			ulp_ops->cm_close_complete(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 			ulp_ops->cm_remote_abort(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 			ulp_ops->cm_abort_complete(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 			ulp_ops->cm_remote_close(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) static int cnic_cm_set_pg(struct cnic_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 	if (cnic_offld_prep(csk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 			cnic_cm_update_pg(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 			cnic_cm_offload_pg(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 	u32 l5_cid = kcqe->pg_host_opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 	u8 opcode = kcqe->op_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 	csk_hold(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 	if (!cnic_in_use(csk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 	/* Possible PG kcqe status:  SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 	if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 		cnic_cm_upcall(cp, csk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 			       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 	csk->pg_cid = kcqe->pg_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 	cnic_cm_conn_req(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 	csk_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 	struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 	u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 	ctx->timestamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 	ctx->wait_cond = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 	wake_up(&ctx->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 	u8 opcode = l4kcqe->op_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 	u32 l5_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 	struct cnic_sock *csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 	if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 		cnic_process_fcoe_term_conn(dev, kcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 		cnic_cm_process_offld_pg(dev, l4kcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 	l5_cid = l4kcqe->conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 	if (opcode & 0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 		l5_cid = l4kcqe->cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 	if (l5_cid >= MAX_CM_SK_TBL_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 	csk = &cp->csk_tbl[l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 	csk_hold(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 	if (!cnic_in_use(csk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 		csk_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 	switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 	case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 		if (l4kcqe->status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 			clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 			cnic_cm_upcall(cp, csk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 				       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 		if (l4kcqe->status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 		else if (l4kcqe->status ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 			 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 			set_bit(SK_F_HW_ERR, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 		smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 		cnic_cm_upcall(cp, csk, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 	case L5CM_RAMROD_CMD_ID_CLOSE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 		struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 		if (l4kcqe->status == 0 && l5kcqe->completion_status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 		netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 			    l4kcqe->status, l5kcqe->completion_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 		opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 		if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 			set_bit(SK_F_HW_ERR, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 		cp->close_conn(csk, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 		/* after we already sent CLOSE_REQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 		if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 		    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 		    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 			cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 			cnic_cm_upcall(cp, csk, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 	csk_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 	struct cnic_dev *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 	for (i = 0; i < num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 		cnic_cm_process_kcqe(dev, kcqe[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) static struct cnic_ulp_ops cm_ulp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 	.indicate_kcqes		= cnic_cm_indicate_kcqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) static void cnic_cm_free_mem(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 	kvfree(cp->csk_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 	cp->csk_tbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 	cnic_free_id_tbl(&cp->csk_port_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) static int cnic_cm_alloc_mem(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 	u32 port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 	cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 			       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 	if (!cp->csk_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 		atomic_set(&cp->csk_tbl[i].ref_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 	port_id = prandom_u32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 	port_id %= CNIC_LOCAL_PORT_RANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 			     CNIC_LOCAL_PORT_MIN, port_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 		cnic_cm_free_mem(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 		/* Unsolicited RESET_COMP or RESET_RECEIVED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 		opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 		csk->state = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 	/* 1. If event opcode matches the expected event in csk->state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 	 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 	 *    event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 	 * 3. If the expected event is 0, meaning the connection was never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 	 *    never established, we accept the opcode from cm_abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 	if (opcode == csk->state || csk->state == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 	    csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 			if (csk->state == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 				csk->state = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 	struct cnic_dev *dev = csk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 	if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 		cnic_cm_upcall(cp, csk, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 	clear_bit(SK_F_CONNECT_START, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 	cnic_close_conn(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 	csk->state = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 	cnic_cm_upcall(cp, csk, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 	u32 seed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 	seed = prandom_u32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 	cnic_ctx_wr(dev, 45, 0, seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 	struct cnic_dev *dev = csk->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 	struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 	union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 	u32 cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 	int close_complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 	switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 		if (cnic_ready_to_close(csk, opcode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 			if (test_bit(SK_F_HW_ERR, &csk->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 				close_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 			else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 				close_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 		close_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 	if (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 		memset(&l5_data, 0, sizeof(l5_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 		cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 				    &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 	} else if (close_complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 		ctx->timestamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 		cnic_close_conn(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 		cnic_cm_upcall(cp, csk, csk->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 	if (!cp->ctx_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 	if (!netif_running(dev->netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 	cnic_bnx2x_delete_wait(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 	cancel_delayed_work(&cp->delete_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 	flush_workqueue(cnic_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 	if (atomic_read(&cp->iscsi_conn) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 		netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 			    atomic_read(&cp->iscsi_conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 	u32 pfid = bp->pfid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 	u32 port = BP_PORT(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 	cnic_init_bnx2x_mac(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 	cnic_bnx2x_set_tcp_options(dev, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) 	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) 	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) 		DEF_MAX_DA_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 		 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 		 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 		 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 		XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 	CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 		DEF_MAX_CWND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) static void cnic_delete_task(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 	struct cnic_local *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 	struct cnic_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 	int need_resched = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 	cp = container_of(work, struct cnic_local, delete_task.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 	dev = cp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 	if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 		struct drv_ctl_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 		memset(&info, 0, sizeof(struct drv_ctl_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 		cp->ethdev->drv_ctl(dev->netdev, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 	for (i = 0; i < cp->max_cid_space; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 		struct cnic_context *ctx = &cp->ctx_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 		if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 			need_resched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 		err = cnic_bnx2x_destroy_ramrod(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 		cnic_free_bnx2x_conn_resc(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 		if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 				atomic_dec(&cp->iscsi_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 	if (need_resched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 		queue_delayed_work(cnic_wq, &cp->delete_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 				   msecs_to_jiffies(10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) static int cnic_cm_open(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 	err = cnic_cm_alloc_mem(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 	err = cp->start_cm(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 	INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 	dev->cm_create = cnic_cm_create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 	dev->cm_destroy = cnic_cm_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 	dev->cm_connect = cnic_cm_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 	dev->cm_abort = cnic_cm_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 	dev->cm_close = cnic_cm_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 	dev->cm_select_dev = cnic_cm_select_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 	cp->ulp_handle[CNIC_ULP_L4] = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 	cnic_cm_free_mem(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) static int cnic_cm_shutdown(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) 	if (!cp->csk_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 		struct cnic_sock *csk = &cp->csk_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 		clear_bit(SK_F_INUSE, &csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 		cnic_cm_cleanup(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 	cnic_cm_free_mem(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) static void cnic_init_context(struct cnic_dev *dev, u32 cid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) 	u32 cid_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) 	cid_addr = GET_CID_ADDR(cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) 	for (i = 0; i < CTX_SIZE; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) 		cnic_ctx_wr(dev, cid_addr, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 	int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 	if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 	for (i = 0; i < cp->ctx_blks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 		int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) 		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 		u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) 		memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 			(u64) cp->ctx_arr[i].mapping >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) 		for (j = 0; j < 10; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) 			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) 			udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) static void cnic_free_irq(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 		cp->disable_int_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 		tasklet_kill(&cp->cnic_irq_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) 		free_irq(ethdev->irq_arr[0].vector, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) static int cnic_request_irq(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 	err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) 		tasklet_disable(&cp->cnic_irq_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) static int cnic_init_bnx2_irq(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 		int err, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 		int sblk_num = cp->status_blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) 		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) 			   BNX2_HC_SB_CONFIG_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) 		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) 		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) 		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) 		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) 		cp->last_status_idx = cp->status_blk.bnx2->status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 		tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) 		err = cnic_request_irq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) 		while (cp->status_blk.bnx2->status_completion_producer_index &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) 		       i < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) 				1 << (11 + sblk_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 			udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 			barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 		if (cp->status_blk.bnx2->status_completion_producer_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) 			cnic_free_irq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) 			goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 		struct status_block *sblk = cp->status_blk.gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) 		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 		int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) 		while (sblk->status_completion_producer_index && i < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) 			CNIC_WR(dev, BNX2_HC_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) 				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) 			udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) 			barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) 		if (sblk->status_completion_producer_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) 			goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) 	netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) static void cnic_enable_bnx2_int(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) 	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) 	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) 		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) 	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) 	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) 	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) 	synchronize_irq(ethdev->irq_arr[0].vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) 	struct cnic_uio_dev *udev = cp->udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) 	u32 cid_addr, tx_cid, sb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) 	u32 val, offset0, offset1, offset2, offset3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) 	struct bnx2_tx_bd *txbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) 	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) 	struct status_block *s_blk = cp->status_blk.gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) 	sb_id = cp->status_blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) 	tx_cid = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) 	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) 		struct status_block_msix *sblk = cp->status_blk.bnx2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) 		tx_cid = TX_TSS_CID + sb_id - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) 		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) 			(TX_TSS_CID << 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) 		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) 	cp->tx_cons = *cp->tx_cons_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 	cid_addr = GET_CID_ADDR(tx_cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) 	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) 		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) 		for (i = 0; i < PHY_CTX_SIZE; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) 			cnic_ctx_wr(dev, cid_addr2, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) 		offset0 = BNX2_L2CTX_TYPE_XI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) 		cnic_init_context(dev, tx_cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) 		cnic_init_context(dev, tx_cid + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) 		offset0 = BNX2_L2CTX_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) 		offset1 = BNX2_L2CTX_CMD_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) 	cnic_ctx_wr(dev, cid_addr, offset0, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) 	cnic_ctx_wr(dev, cid_addr, offset1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) 	txbd = udev->l2_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) 	buf_map = udev->l2_buf_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) 	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) 		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) 		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) 	val = (u64) ring_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) 	cnic_ctx_wr(dev, cid_addr, offset2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) 	txbd->tx_bd_haddr_hi = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) 	val = (u64) ring_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) 	cnic_ctx_wr(dev, cid_addr, offset3, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) 	txbd->tx_bd_haddr_lo = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) 	struct cnic_uio_dev *udev = cp->udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) 	u32 cid_addr, sb_id, val, coal_reg, coal_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) 	struct bnx2_rx_bd *rxbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) 	struct status_block *s_blk = cp->status_blk.gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) 	dma_addr_t ring_map = udev->l2_ring_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) 	sb_id = cp->status_blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) 	cnic_init_context(dev, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) 	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) 	coal_reg = BNX2_HC_COMMAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) 	coal_val = CNIC_RD(dev, coal_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) 		struct status_block_msix *sblk = cp->status_blk.bnx2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) 		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) 		coal_reg = BNX2_HC_COALESCE_NOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) 		coal_val = 1 << (11 + sb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) 	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) 		CNIC_WR(dev, coal_reg, coal_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) 	cp->rx_cons = *cp->rx_cons_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) 	cid_addr = GET_CID_ADDR(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) 	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) 	if (sb_id == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) 		val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) 		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) 	rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) 	for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) 		dma_addr_t buf_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) 		int n = (i % cp->l2_rx_ring_size) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) 		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) 		rxbd->rx_bd_len = cp->l2_single_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) 		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) 		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) 		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) 	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) 	rxbd->rx_bd_haddr_hi = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) 	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) 	rxbd->rx_bd_haddr_lo = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) 	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) 	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) 	struct kwqe *wqes[1], l2kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) 	memset(&l2kwqe, 0, sizeof(l2kwqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) 	wqes[0] = &l2kwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) 	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) 			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) 			       KWQE_OPCODE_SHIFT) | 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) 	dev->submit_kwqes(dev, wqes, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) static void cnic_set_bnx2_mac(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) 	val = cp->func << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) 	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) 	val = cnic_reg_rd_ind(dev, cp->shmem_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) 			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) 	dev->mac_addr[0] = (u8) (val >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) 	dev->mac_addr[1] = (u8) val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) 	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) 	val = cnic_reg_rd_ind(dev, cp->shmem_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) 			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) 	dev->mac_addr[2] = (u8) (val >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) 	dev->mac_addr[3] = (u8) (val >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) 	dev->mac_addr[4] = (u8) (val >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) 	dev->mac_addr[5] = (u8) val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) 	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) 	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) 	if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) 		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) 	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) 	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) 	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) static int cnic_start_bnx2_hw(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) 	struct status_block *sblk = cp->status_blk.gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) 	u32 val, kcq_cid_addr, kwq_cid_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) 	cnic_set_bnx2_mac(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) 	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) 	if (CNIC_PAGE_BITS > 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) 		val |= (12 - 8)  << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) 		val |= (CNIC_PAGE_BITS - 8)  << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) 	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) 	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) 	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) 	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) 	err = cnic_setup_5709_context(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) 	cnic_init_context(dev, KWQ_CID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) 	cnic_init_context(dev, KCQ_CID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) 	kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) 	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) 	cp->max_kwq_idx = MAX_KWQ_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) 	cp->kwq_prod_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) 	cp->kwq_con_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) 	set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) 	if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) 		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) 		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) 	/* Initialize the kernel work queue context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) 	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) 	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) 	val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) 	val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) 	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) 	val = (u32) cp->kwq_info.pgtbl_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) 	kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) 	cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) 	cp->kcq1.sw_prod_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) 	cp->kcq1.hw_prod_idx_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) 		&sblk->status_completion_producer_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) 	cp->kcq1.status_idx_ptr = &sblk->status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) 	/* Initialize the kernel complete queue context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) 	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) 	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) 	val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) 	val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) 	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) 	val = (u32) cp->kcq1.dma.pgtbl_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) 	cp->int_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) 		struct status_block_msix *msblk = cp->status_blk.bnx2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) 		u32 sb_id = cp->status_blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) 		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) 		cp->kcq1.hw_prod_idx_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) 			&msblk->status_completion_producer_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) 		cp->kcq1.status_idx_ptr = &msblk->status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) 		cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) 		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) 		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) 		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) 	/* Enable Commnad Scheduler notification when we write to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) 	 * host producer index of the kernel contexts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) 	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) 	/* Enable Command Scheduler notification when we write to either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) 	 * the Send Queue or Receive Queue producer indexes of the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) 	 * bypass contexts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) 	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) 	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) 	/* Notify COM when the driver post an application buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) 	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) 	/* Set the CP and COM doorbells.  These two processors polls the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) 	 * doorbell for a non zero value before running.  This must be done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) 	 * after setting up the kernel queue contexts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) 	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) 	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) 	cnic_init_bnx2_tx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) 	cnic_init_bnx2_rx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) 	err = cnic_init_bnx2_irq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) 		netdev_err(dev->netdev, "cnic_init_irq failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) 		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) 		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) 	ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) 	u32 start_offset = ethdev->ctx_tbl_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) 	for (i = 0; i < cp->ctx_blks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) 		struct cnic_ctx *ctx = &cp->ctx_arr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) 		dma_addr_t map = ctx->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) 		if (cp->ctx_align) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) 			unsigned long mask = cp->ctx_align - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) 			map = (map + mask) & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) 		cnic_ctx_tbl_wr(dev, start_offset + i, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) 	tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) 		err = cnic_request_irq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) 						u16 sb_id, u8 sb_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) 						u8 disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) 	u32 addr = BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) 			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) 			offsetof(struct hc_status_block_data_e1x, index_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) 			sizeof(struct hc_index_data)*sb_index +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) 			offsetof(struct hc_index_data, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) 	u16 flags = CNIC_RD16(dev, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) 	/* clear and set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) 	flags &= ~HC_INDEX_DATA_HC_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) 	flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) 		  HC_INDEX_DATA_HC_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) 	CNIC_WR16(dev, addr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) 	u8 sb_id = cp->status_blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) 			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) 			offsetof(struct hc_status_block_data_e1x, index_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) 			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) 			offsetof(struct hc_index_data, timeout), 64 / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) 	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) 				    struct client_init_ramrod_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) 	struct cnic_uio_dev *udev = cp->udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) 	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) 	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) 	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) 	u32 cli = cp->ethdev->iscsi_l2_client_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) 	memset(txbd, 0, CNIC_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) 	buf_map = udev->l2_buf_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) 	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) 		struct eth_tx_start_bd *start_bd = &txbd->start_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) 		struct eth_tx_parse_bd_e1x *pbd_e1x =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) 			&((txbd + 1)->parse_bd_e1x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) 		struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) 		struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) 		start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) 		start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) 		reg_bd->addr_hi = start_bd->addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) 		reg_bd->addr_lo = start_bd->addr_lo + 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) 		start_bd->nbytes = cpu_to_le16(0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) 		start_bd->nbd = cpu_to_le16(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) 		start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) 		start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) 		start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) 		if (BNX2X_CHIP_IS_E2_PLUS(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) 			pbd_e2->parsing_data = (UNICAST_ADDRESS <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) 				ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) 			pbd_e1x->global_data = (UNICAST_ADDRESS <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) 				ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) 	val = (u64) ring_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) 	txbd->next_bd.addr_hi = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) 	data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) 	val = (u64) ring_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) 	txbd->next_bd.addr_lo = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) 	data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) 	/* Other ramrod params */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) 	data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) 	data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) 	/* reset xstorm per client statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) 	if (cli < MAX_STAT_COUNTER_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) 		data->general.statistics_zero_flg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) 		data->general.statistics_en_flg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) 		data->general.statistics_counter_id = cli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) 	cp->tx_cons_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) 		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) 				    struct client_init_ramrod_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) 	struct cnic_uio_dev *udev = cp->udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) 	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) 				CNIC_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) 	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) 				(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) 	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) 	u32 cli = cp->ethdev->iscsi_l2_client_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) 	int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) 	dma_addr_t ring_map = udev->l2_ring_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) 	/* General data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) 	data->general.client_id = cli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) 	data->general.activate_flg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) 	data->general.sp_client_id = cli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) 	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) 	data->general.func_id = bp->pfid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) 	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) 		dma_addr_t buf_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) 		int n = (i % cp->l2_rx_ring_size) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) 		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) 		rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) 		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) 	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) 	rxbd->addr_hi = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) 	data->rx.bd_page_base.hi = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) 	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) 	rxbd->addr_lo = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) 	data->rx.bd_page_base.lo = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) 	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) 	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) 	rxcqe->addr_hi = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) 	data->rx.cqe_page_base.hi = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) 	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) 	rxcqe->addr_lo = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) 	data->rx.cqe_page_base.lo = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) 	/* Other ramrod params */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) 	data->rx.client_qzone_id = cl_qzone_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) 	data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) 	data->rx.status_block_id = BNX2X_DEF_SB_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) 	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) 	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) 	data->rx.outer_vlan_removal_enable_flg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) 	data->rx.silent_vlan_removal_flg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) 	data->rx.silent_vlan_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) 	data->rx.silent_vlan_mask = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) 	cp->rx_cons_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) 		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) 	cp->rx_cons = *cp->rx_cons_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) 	u32 pfid = bp->pfid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) 	cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) 			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) 	cp->kcq1.sw_prod_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) 	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) 		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) 		cp->kcq1.hw_prod_idx_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) 			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) 		cp->kcq1.status_idx_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) 			&sb->sb.running_index[SM_RX_ID];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) 		struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) 		cp->kcq1.hw_prod_idx_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) 			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) 		cp->kcq1.status_idx_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) 			&sb->sb.running_index[SM_RX_ID];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) 	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) 		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) 		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) 					USTORM_FCOE_EQ_PROD_OFFSET(pfid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) 		cp->kcq2.sw_prod_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) 		cp->kcq2.hw_prod_idx_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) 			&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) 		cp->kcq2.status_idx_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) 			&sb->sb.running_index[SM_RX_ID];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) 	u32 pfid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) 	dev->stats_addr = ethdev->addr_drv_info_to_mcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) 	cp->func = bp->pf_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) 	pfid = bp->pfid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) 	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) 			       cp->iscsi_start_cid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) 	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) 		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) 					cp->fcoe_start_cid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) 	cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) 	cnic_init_bnx2x_kcq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) 	/* Only 1 EQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) 	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) 		CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) 		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) 		cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) 		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) 		(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) 		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) 		cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) 		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) 		(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) 		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) 		CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) 		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) 		HC_INDEX_ISCSI_EQ_CONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) 		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) 		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) 		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) 		(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) 	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) 		TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) 	cnic_setup_bnx2x_context(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) 	ret = cnic_init_bnx2x_irq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) 	ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) static void cnic_init_rings(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) 	struct cnic_uio_dev *udev = cp->udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) 	if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) 	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) 		cnic_init_bnx2_tx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) 		cnic_init_bnx2_rx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) 		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) 	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) 		u32 cli = cp->ethdev->iscsi_l2_client_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) 		u32 cid = cp->ethdev->iscsi_l2_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) 		u32 cl_qzone_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) 		struct client_init_ramrod_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) 		union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) 		struct ustorm_eth_rx_producers rx_prods = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) 		u32 off, i, *cid_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) 		rx_prods.bd_prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) 		rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) 		cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) 		off = BAR_USTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) 			(BNX2X_CHIP_IS_E2_PLUS(bp) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) 			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) 			 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) 		for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) 			CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) 		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) 		data = udev->l2_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) 		cid_ptr = udev->l2_buf + 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) 		memset(data, 0, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) 		cnic_init_bnx2x_tx_ring(dev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) 		cnic_init_bnx2x_rx_ring(dev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) 		data->general.fp_hsi_ver =  ETH_FP_HSI_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) 		l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) 		l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) 		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) 		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) 			cid, ETH_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) 		i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) 		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) 		       ++i < 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) 			msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) 		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) 			netdev_err(dev->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) 				"iSCSI CLIENT_SETUP did not complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) 		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) 		cnic_ring_ctl(dev, cid, cli, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) 		*cid_ptr = cid >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) 		*(cid_ptr + 1) = cid * bp->db_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) 		*(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) static void cnic_shutdown_rings(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) 	struct cnic_uio_dev *udev = cp->udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) 	void *rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) 	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) 	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) 		cnic_shutdown_bnx2_rx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) 	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) 		u32 cli = cp->ethdev->iscsi_l2_client_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) 		u32 cid = cp->ethdev->iscsi_l2_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) 		union l5cm_specific_data l5_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) 		cnic_ring_ctl(dev, cid, cli, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) 		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) 		l5_data.phy_address.lo = cli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) 		l5_data.phy_address.hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) 		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) 			cid, ETH_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) 		i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) 		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) 		       ++i < 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) 			msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) 		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) 			netdev_err(dev->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) 				"iSCSI CLIENT_HALT did not complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) 		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) 		memset(&l5_data, 0, sizeof(l5_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) 		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) 			cid, NONE_CONNECTION_TYPE, &l5_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) 		msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) 	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) 	rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) 	memset(rx_ring, 0, CNIC_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) static int cnic_register_netdev(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) 	if (!ethdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) 	if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) 	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) 		netdev_err(dev->netdev, "register_cnic failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) 	/* Read iSCSI config again.  On some bnx2x device, iSCSI config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) 	 * can change after firmware is downloaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) 	dev->max_iscsi_conn = ethdev->max_iscsi_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) 	if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) 		dev->max_iscsi_conn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) static void cnic_unregister_netdev(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) 	if (!ethdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) 	ethdev->drv_unregister_cnic(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) static int cnic_start_hw(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) 	struct cnic_eth_dev *ethdev = cp->ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) 	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) 		return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) 	dev->regview = ethdev->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) 	pci_dev_get(dev->pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) 	cp->func = PCI_FUNC(dev->pcidev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) 	cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) 	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) 	err = cp->alloc_resc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) 		netdev_err(dev->netdev, "allocate resource failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) 	err = cp->start_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) 	err = cnic_cm_open(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) 		goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) 	set_bit(CNIC_F_CNIC_UP, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) 	cp->enable_int(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) 	if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) 		cp->stop_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) 		cp->free_resc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) 	pci_dev_put(dev->pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) 	cnic_disable_bnx2_int_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) 	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) 	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) 	cnic_init_context(dev, KWQ_CID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) 	cnic_init_context(dev, KCQ_CID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) 	cnic_setup_5709_context(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) 	cnic_free_irq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) 	cnic_free_resc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) 	u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) 	u32 sb_id = cp->status_blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) 	u32 idx_off, syn_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) 	cnic_free_irq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) 	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) 		idx_off = offsetof(struct hc_status_block_e2, index_values) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) 			  (hc_index * sizeof(u16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) 		syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) 		idx_off = offsetof(struct hc_status_block_e1x, index_values) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) 			  (hc_index * sizeof(u16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) 		syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) 		  idx_off, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) 	*cp->kcq1.hw_prod_idx_ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) 		CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) 	CNIC_WR16(dev, cp->kcq1.io_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) 	cnic_free_resc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) static void cnic_stop_hw(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) 	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) 		struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) 		int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) 		/* Need to wait for the ring shutdown event to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) 		 * before clearing the CNIC_UP flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) 		while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) 			msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) 		cnic_shutdown_rings(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) 		cp->stop_cm(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) 		cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) 		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) 		RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) 		synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) 		cnic_cm_shutdown(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) 		cp->stop_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) 		pci_dev_put(dev->pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) static void cnic_free_dev(struct cnic_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) 	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) 		msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) 	if (atomic_read(&dev->ref_count) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) 		netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) 	netdev_info(dev->netdev, "Removed CNIC device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) 	dev_put(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) 	kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) 				struct cnic_fc_npiv_tbl *npiv_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) 	struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) 	struct bnx2x *bp = netdev_priv(dev->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) 		return -EAGAIN;     /* bnx2x is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) 	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) 	ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) 				       struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) 	struct cnic_dev *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) 	struct cnic_local *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) 	int alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) 	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) 	cdev = kzalloc(alloc_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) 	if (cdev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) 	cdev->netdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) 	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) 	cdev->register_device = cnic_register_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) 	cdev->unregister_device = cnic_unregister_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) 	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) 	cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) 	atomic_set(&cdev->ref_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) 	cp = cdev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) 	cp->dev = cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) 	cp->l2_single_buf_size = 0x400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) 	cp->l2_rx_ring_size = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) 	spin_lock_init(&cp->cnic_ulp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) 	netdev_info(dev, "Added CNIC device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) 	return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) 	struct cnic_dev *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) 	struct cnic_local *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) 	struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) 	struct cnic_eth_dev *ethdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) 	if (bp->cnic_probe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) 		ethdev = (bp->cnic_probe)(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) 	if (!ethdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) 	pdev = ethdev->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) 	if (!pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) 	dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) 	pci_dev_get(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) 	if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) 	     pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) 	    (pdev->revision < 0x10)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) 		pci_dev_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) 		goto cnic_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) 	pci_dev_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) 	cdev = cnic_alloc_dev(dev, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) 	if (cdev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) 		goto cnic_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) 	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) 	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) 	cp = cdev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) 	cp->ethdev = ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) 	cdev->pcidev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) 	cp->chip_id = ethdev->chip_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) 	cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) 	cp->cnic_ops = &cnic_bnx2_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) 	cp->start_hw = cnic_start_bnx2_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) 	cp->stop_hw = cnic_stop_bnx2_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) 	cp->setup_pgtbl = cnic_setup_page_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) 	cp->alloc_resc = cnic_alloc_bnx2_resc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) 	cp->free_resc = cnic_free_resc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) 	cp->start_cm = cnic_cm_init_bnx2_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) 	cp->stop_cm = cnic_cm_stop_bnx2_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) 	cp->enable_int = cnic_enable_bnx2_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) 	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) 	cp->close_conn = cnic_close_bnx2_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) 	return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) cnic_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) 	dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) 	struct cnic_dev *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) 	struct cnic_local *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) 	struct bnx2x *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) 	struct cnic_eth_dev *ethdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) 	if (bp->cnic_probe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) 		ethdev = bp->cnic_probe(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) 	if (!ethdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) 	pdev = ethdev->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) 	if (!pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) 	dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) 	cdev = cnic_alloc_dev(dev, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) 	if (cdev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) 		dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) 	set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) 	cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) 	cp = cdev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) 	cp->ethdev = ethdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) 	cdev->pcidev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) 	cp->chip_id = ethdev->chip_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) 	cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) 	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) 		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) 	if (CNIC_SUPPORTS_FCOE(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) 		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) 		cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) 	if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) 		cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) 	memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) 	cp->cnic_ops = &cnic_bnx2x_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) 	cp->start_hw = cnic_start_bnx2x_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) 	cp->stop_hw = cnic_stop_bnx2x_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) 	cp->setup_pgtbl = cnic_setup_page_tbl_le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) 	cp->alloc_resc = cnic_alloc_bnx2x_resc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) 	cp->free_resc = cnic_free_resc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) 	cp->start_cm = cnic_cm_init_bnx2x_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) 	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) 	cp->enable_int = cnic_enable_bnx2x_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) 	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) 	if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) 		cp->ack_int = cnic_ack_bnx2x_e2_msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) 		cp->arm_int = cnic_arm_bnx2x_e2_msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) 		cp->ack_int = cnic_ack_bnx2x_msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) 		cp->arm_int = cnic_arm_bnx2x_msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) 	cp->close_conn = cnic_close_bnx2x_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) 	return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) static struct cnic_dev *is_cnic_dev(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) 	struct ethtool_drvinfo drvinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) 	struct cnic_dev *cdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) 	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) 		memset(&drvinfo, 0, sizeof(drvinfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) 		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) 		if (!strcmp(drvinfo.driver, "bnx2"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) 			cdev = init_bnx2_cnic(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) 		if (!strcmp(drvinfo.driver, "bnx2x"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) 			cdev = init_bnx2x_cnic(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) 		if (cdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) 			write_lock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) 			list_add(&cdev->list, &cnic_dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) 			write_unlock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) 	return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) 			      u16 vlan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) 	int if_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) 	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) 		struct cnic_ulp_ops *ulp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) 		void *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) 		mutex_lock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) 		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) 						lockdep_is_held(&cnic_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) 		if (!ulp_ops || !ulp_ops->indicate_netevent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) 			mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) 		ctx = cp->ulp_handle[if_type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) 		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) 		mutex_unlock(&cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) 		ulp_ops->indicate_netevent(ctx, event, vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) 		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) /* netdev event handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) 							 void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) 	struct cnic_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) 	int new_dev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) 	dev = cnic_from_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) 	if (!dev && event == NETDEV_REGISTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) 		/* Check for the hot-plug device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) 		dev = is_cnic_dev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) 		if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) 			new_dev = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) 			cnic_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) 	if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) 		struct cnic_local *cp = dev->cnic_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) 		if (new_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) 			cnic_ulp_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) 		else if (event == NETDEV_UNREGISTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) 			cnic_ulp_exit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) 		if (event == NETDEV_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) 			if (cnic_register_netdev(dev) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) 				cnic_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) 			if (!cnic_start_hw(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) 				cnic_ulp_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) 		cnic_rcv_netevent(cp, event, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) 		if (event == NETDEV_GOING_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) 			cnic_ulp_stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) 			cnic_stop_hw(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) 			cnic_unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) 		} else if (event == NETDEV_UNREGISTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) 			write_lock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) 			list_del_init(&dev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) 			write_unlock(&cnic_dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) 			cnic_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) 			cnic_free_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) 		cnic_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) 		struct net_device *realdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) 		u16 vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) 		vid = cnic_get_vlan(netdev, &realdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) 		if (realdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) 			dev = cnic_from_netdev(realdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) 			if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) 				vid |= VLAN_CFI_MASK;	/* make non-zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) 				cnic_rcv_netevent(dev->cnic_priv, event, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) 				cnic_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) static struct notifier_block cnic_netdev_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) 	.notifier_call = cnic_netdev_event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) static void cnic_release(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) 	struct cnic_uio_dev *udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) 	while (!list_empty(&cnic_udev_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) 		udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) 				  list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) 		cnic_free_uio(udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) static int __init cnic_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) 	pr_info("%s", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) 	rc = register_netdevice_notifier(&cnic_netdev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) 		cnic_release();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) 	cnic_wq = create_singlethread_workqueue("cnic_wq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) 	if (!cnic_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) 		cnic_release();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) 		unregister_netdevice_notifier(&cnic_netdev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) static void __exit cnic_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) 	unregister_netdevice_notifier(&cnic_netdev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) 	cnic_release();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) 	destroy_workqueue(cnic_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) module_init(cnic_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) module_exit(cnic_exit);