Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * This program is free software; you may redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * the Free Software Foundation; version 2 of the License.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <scsi/fc/fc_fip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <scsi/fc/fc_els.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <scsi/fc/fc_fcoe.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <scsi/fc_frame.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <scsi/libfc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "fnic_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include "fnic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "fnic_fip.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include "cq_enet_desc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "cq_exch_desc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) struct workqueue_struct *fnic_fip_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) struct workqueue_struct *fnic_event_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) static void fnic_set_eth_mode(struct fnic *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) void fnic_handle_link(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	struct fnic *fnic = container_of(work, struct fnic, link_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	int old_link_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	u32 old_link_down_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	u64 old_port_speed, new_port_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	if (fnic->stop_rx_link_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	old_link_down_cnt = fnic->link_down_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	old_link_status = fnic->link_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	old_port_speed = atomic64_read(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 			&fnic->fnic_stats.misc_stats.current_port_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	fnic->link_status = vnic_dev_link_status(fnic->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	new_port_speed = vnic_dev_port_speed(fnic->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 			new_port_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	if (old_port_speed != new_port_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 		shost_printk(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 				"Current vnic speed set to :  %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 				new_port_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	switch (vnic_dev_port_speed(fnic->vdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	case DCEM_PORTSPEED_10G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_10GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	case DCEM_PORTSPEED_20G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_20GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	case DCEM_PORTSPEED_25G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_25GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	case DCEM_PORTSPEED_40G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	case DCEM_PORTSPEED_4x10G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_40GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	case DCEM_PORTSPEED_100G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_100GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	if (old_link_status == fnic->link_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		if (!fnic->link_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 			/* DOWN -> DOWN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 			fnic_fc_trace_set_data(fnic->lport->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 				FNIC_FC_LE, "Link Status: DOWN->DOWN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 				strlen("Link Status: DOWN->DOWN"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 			if (old_link_down_cnt != fnic->link_down_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 				/* UP -> DOWN -> UP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 				fnic->lport->host_stats.link_failure_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 				fnic_fc_trace_set_data(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 					fnic->lport->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 					FNIC_FC_LE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 					"Link Status:UP_DOWN_UP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 					strlen("Link_Status:UP_DOWN_UP")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 					);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 					     "link down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 				fcoe_ctlr_link_down(&fnic->ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 				if (fnic->config.flags & VFCF_FIP_CAPABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 					/* start FCoE VLAN discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 					fnic_fc_trace_set_data(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 						fnic->lport->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 						FNIC_FC_LE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 						"Link Status: UP_DOWN_UP_VLAN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 						strlen(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 						"Link Status: UP_DOWN_UP_VLAN")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 						);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 					fnic_fcoe_send_vlan_req(fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 					return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 					     "link up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 				fcoe_ctlr_link_up(&fnic->ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 				/* UP -> UP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 				fnic_fc_trace_set_data(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 					fnic->lport->host->host_no, FNIC_FC_LE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 					"Link Status: UP_UP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 					strlen("Link Status: UP_UP"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	} else if (fnic->link_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		/* DOWN -> UP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		if (fnic->config.flags & VFCF_FIP_CAPABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 			/* start FCoE VLAN discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 				fnic_fc_trace_set_data(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 				fnic->lport->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 				FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 				strlen("Link Status: DOWN_UP_VLAN"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 			fnic_fcoe_send_vlan_req(fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 			"Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		fcoe_ctlr_link_up(&fnic->ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		/* UP -> DOWN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		fnic->lport->host_stats.link_failure_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		fnic_fc_trace_set_data(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			fnic->lport->host->host_no, FNIC_FC_LE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 			"Link Status: UP_DOWN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			strlen("Link Status: UP_DOWN"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		if (fnic->config.flags & VFCF_FIP_CAPABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 				"deleting fip-timer during link-down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 			del_timer_sync(&fnic->fip_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		fcoe_ctlr_link_down(&fnic->ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * This function passes incoming fabric frames to libFC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) void fnic_handle_frame(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	struct fnic *fnic = container_of(work, struct fnic, frame_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	struct fc_lport *lp = fnic->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	struct fc_frame *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	while ((skb = skb_dequeue(&fnic->frame_queue))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		if (fnic->stop_rx_link_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		fp = (struct fc_frame *)skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		 * If we're in a transitional state, just re-queue and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		 * The queue will be serviced when we get to a stable state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		if (fnic->state != FNIC_IN_FC_MODE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		    fnic->state != FNIC_IN_ETH_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 			skb_queue_head(&fnic->frame_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		fc_exch_recv(lp, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) void fnic_fcoe_evlist_free(struct fnic *fnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	struct fnic_event *fevt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	struct fnic_event *next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	if (list_empty(&fnic->evlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		list_del(&fevt->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		kfree(fevt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) void fnic_handle_event(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	struct fnic *fnic = container_of(work, struct fnic, event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	struct fnic_event *fevt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	struct fnic_event *next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	if (list_empty(&fnic->evlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		if (fnic->stop_rx_link_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 			list_del(&fevt->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 			kfree(fevt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		 * If we're in a transitional state, just re-queue and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		 * The queue will be serviced when we get to a stable state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		if (fnic->state != FNIC_IN_FC_MODE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		    fnic->state != FNIC_IN_ETH_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		list_del(&fevt->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		switch (fevt->event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		case FNIC_EVT_START_VLAN_DISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 			fnic_fcoe_send_vlan_req(fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		case FNIC_EVT_START_FCF_DISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 				  "Start FCF Discovery\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			fnic_fcoe_start_fcf_disc(fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 				  "Unknown event 0x%x\n", fevt->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		kfree(fevt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)  * Check if the Received FIP FLOGI frame is rejected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)  * @fip: The FCoE controller that received the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  * @skb: The received FIP frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  * Returns non-zero if the frame is rejected with unsupported cmd with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  * insufficient resource els explanation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 					 struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	struct fc_lport *lport = fip->lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	struct fip_header *fiph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	struct fc_frame_header *fh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	struct fip_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	struct fip_encaps *els;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	u16 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	u8 els_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	u8 sub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	size_t rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	size_t dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	if (skb_linearize(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	if (skb->len < sizeof(*fiph))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	fiph = (struct fip_header *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	op = ntohs(fiph->fip_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	sub = fiph->fip_subcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	if (op != FIP_OP_LS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	if (sub != FIP_SC_REP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	rlen = ntohs(fiph->fip_dl_len) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	if (rlen + sizeof(*fiph) > skb->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	desc = (struct fip_desc *)(fiph + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	dlen = desc->fip_dlen * FIP_BPW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (desc->fip_dtype == FIP_DT_FLOGI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		if (dlen < sizeof(*els) + sizeof(*fh) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		els = (struct fip_encaps *)desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		fh = (struct fc_frame_header *)(els + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		if (!fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		 * ELS command code, reason and explanation should be = Reject,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		 * unsupported command and insufficient resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		els_op = *(u8 *)(fh + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		if (els_op == ELS_LS_RJT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			shost_printk(KERN_INFO, lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 				  "Flogi Request Rejected by Switch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		shost_printk(KERN_INFO, lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 				"Flogi Request Accepted by Switch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	struct fcoe_ctlr *fip = &fnic->ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	char *eth_fr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	struct fip_vlan *vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	u64 vlan_tov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	fnic_fcoe_reset_vlans(fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	fnic->set_vlan(fnic, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			  "Sending VLAN request...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	skb = dev_alloc_skb(sizeof(struct fip_vlan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	eth_fr = (char *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	vlan = (struct fip_vlan *)eth_fr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	memset(vlan, 0, sizeof(*vlan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	vlan->eth.h_proto = htons(ETH_P_FIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	vlan->fip.fip_op = htons(FIP_OP_VLAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	vlan->fip.fip_subcode = FIP_SC_VL_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	skb_put(skb, sizeof(*vlan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	skb->protocol = htons(ETH_P_FIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	fip->send(fip, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	/* set a timer so that we can retry if there no response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	struct fcoe_ctlr *fip = &fnic->ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	struct fip_header *fiph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	struct fip_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	u16 vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	size_t rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	size_t dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	struct fcoe_vlan *vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	u64 sol_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		  "Received VLAN response...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	fiph = (struct fip_header *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		  "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		  ntohs(fiph->fip_op), fiph->fip_subcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	rlen = ntohs(fiph->fip_dl_len) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	fnic_fcoe_reset_vlans(fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	spin_lock_irqsave(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	desc = (struct fip_desc *)(fiph + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	while (rlen > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		dlen = desc->fip_dlen * FIP_BPW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		switch (desc->fip_dtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		case FIP_DT_VLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			shost_printk(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 				  "process_vlan_resp: FIP VLAN %d\n", vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			if (!vlan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 				/* retry from timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 				spin_unlock_irqrestore(&fnic->vlans_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 							flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			vlan->vid = vid & 0x0fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			vlan->state = FIP_VLAN_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			list_add_tail(&vlan->list, &fnic->vlans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		desc = (struct fip_desc *)((char *)desc + dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		rlen -= dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	/* any VLAN descriptors present ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	if (list_empty(&fnic->vlans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		/* retry from timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			  "No VLAN descriptors in FIP VLAN response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	fnic->set_vlan(fnic, vlan->vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	vlan->state = FIP_VLAN_SENT; /* sent now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	vlan->sol_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* start the solicitation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	fcoe_ctlr_link_up(fip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	struct fcoe_vlan *vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	u64 sol_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	spin_lock_irqsave(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	fnic->set_vlan(fnic, vlan->vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	vlan->state = FIP_VLAN_SENT; /* sent now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	vlan->sol_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	/* start the solicitation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	fcoe_ctlr_link_up(&fnic->ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	struct fcoe_vlan *fvlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	spin_lock_irqsave(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	if (list_empty(&fnic->vlans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	if (fvlan->state == FIP_VLAN_USED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	if (fvlan->state == FIP_VLAN_SENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		fvlan->state = FIP_VLAN_USED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	struct fnic_event *fevt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	if (!fevt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	fevt->fnic = fnic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	fevt->event = ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	list_add_tail(&fevt->list, &fnic->evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	schedule_work(&fnic->event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	struct fip_header *fiph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	u16 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	u8 sub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	if (!skb || !(skb->data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (skb_linearize(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	fiph = (struct fip_header *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	op = ntohs(fiph->fip_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	sub = fiph->fip_subcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		/* pass it on to fcoe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	} else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		/* set the vlan as used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		fnic_fcoe_process_vlan_resp(fnic, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	} else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		/* received CVL request, restart vlan disc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		/* pass it on to fcoe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) void fnic_handle_fip_frame(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	struct ethhdr *eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		if (fnic->stop_rx_link_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		 * If we're in a transitional state, just re-queue and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		 * The queue will be serviced when we get to a stable state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		if (fnic->state != FNIC_IN_FC_MODE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		    fnic->state != FNIC_IN_ETH_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 			skb_queue_head(&fnic->fip_frame_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		eh = (struct ethhdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		if (eh->h_proto == htons(ETH_P_FIP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			skb_pull(skb, sizeof(*eh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 				dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 			 * If there's FLOGI rejects - clear all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 			 * fcf's & restart from scratch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 			if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 				atomic64_inc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 					&fnic_stats->vlan_stats.flogi_rejects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 				shost_printk(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 					  "Trigger a Link down - VLAN Disc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 				fcoe_ctlr_link_down(&fnic->ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 				/* start FCoE VLAN discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 				fnic_fcoe_send_vlan_req(fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 				dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			fcoe_ctlr_recv(&fnic->ctlr, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  * @fnic:	fnic instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  * @skb:	Ethernet Frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	struct fc_frame *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	struct ethhdr *eh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	struct fcoe_hdr *fcoe_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	struct fcoe_crc_eof *ft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	 * Undo VLAN encapsulation if present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	eh = (struct ethhdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	if (eh->h_proto == htons(ETH_P_8021Q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		eh = skb_pull(skb, VLAN_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	if (eh->h_proto == htons(ETH_P_FIP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			printk(KERN_ERR "Dropped FIP frame, as firmware "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 					"uses non-FIP mode, Enable FIP "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 					"using UCSM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			printk(KERN_ERR "fnic ctlr frame trace error!!!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		skb_queue_tail(&fnic->fip_frame_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		queue_work(fnic_fip_queue, &fnic->fip_frame_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		return 1;		/* let caller know packet was used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	if (eh->h_proto != htons(ETH_P_FCOE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	skb_set_network_header(skb, sizeof(*eh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	skb_pull(skb, sizeof(*eh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	fcoe_hdr = (struct fcoe_hdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	fp = (struct fc_frame *)skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	fc_frame_init(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	fr_sof(fp) = fcoe_hdr->fcoe_sof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	skb_pull(skb, sizeof(struct fcoe_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	fr_eof(fp) = ft->fcoe_eof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	skb_trim(skb, skb->len - sizeof(*ft));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718)  * fnic_update_mac_locked() - set data MAC address and filters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)  * @fnic:	fnic instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  * @new:	newly-assigned FCoE MAC address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722)  * Called with the fnic lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	u8 *ctl = fnic->ctlr.ctl_src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	u8 *data = fnic->data_src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	if (is_zero_ether_addr(new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		new = ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	if (ether_addr_equal(data, new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		vnic_dev_del_addr(fnic->vdev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	memcpy(data, new, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	if (!ether_addr_equal(new, ctl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		vnic_dev_add_addr(fnic->vdev, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  * fnic_update_mac() - set data MAC address and filters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  * @lport:	local port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  * @new:	newly-assigned FCoE MAC address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) void fnic_update_mac(struct fc_lport *lport, u8 *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	struct fnic *fnic = lport_priv(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	spin_lock_irq(&fnic->fnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	fnic_update_mac_locked(fnic, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	spin_unlock_irq(&fnic->fnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  * fnic_set_port_id() - set the port_ID after successful FLOGI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  * @lport:	local port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  * @port_id:	assigned FC_ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  * @fp:		received frame containing the FLOGI accept or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  * This is called from libfc when a new FC_ID has been assigned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  * This causes us to reset the firmware to FC_MODE and setup the new MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  * address and FC_ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  * It is also called with FC_ID 0 when we're logged off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767)  * If the FC_ID is due to point-to-point, fp may be NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	struct fnic *fnic = lport_priv(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	u8 *mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		     port_id, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	 * If we're clearing the FC_ID, change to use the ctl_src_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	 * Set ethernet mode to send FLOGI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (!port_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		fnic_set_eth_mode(fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		mac = fr_cb(fp)->granted_mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		if (is_zero_ether_addr(mac)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			/* non-FIP - FLOGI already accepted - ignore return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		fnic_update_mac(lport, mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	/* Change state to reflect transition to FC mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	spin_lock_irq(&fnic->fnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			     "Unexpected fnic state %s while"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			     " processing flogi resp\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			     fnic_state_to_str(fnic->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		spin_unlock_irq(&fnic->fnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	spin_unlock_irq(&fnic->fnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	 * Send FLOGI registration to firmware to set up FC mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	 * The new address will be set up when registration completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	ret = fnic_flogi_reg_handler(fnic, port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		spin_lock_irq(&fnic->fnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			fnic->state = FNIC_IN_ETH_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		spin_unlock_irq(&fnic->fnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 				    *cq_desc, struct vnic_rq_buf *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 				    int skipped __attribute__((unused)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 				    void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	struct fc_frame *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	u8 type, color, eop, sop, ingress_port, vlan_stripped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	u8 fcoe = 0, fcoe_sof, fcoe_eof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	u8 fcs_ok = 1, packet_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	u32 rss_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	u16 exchange_id, tmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	u8 sof = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	u8 eof = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	u32 fcp_bytes_written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	skb = buf->os_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	fp = (struct fc_frame *)skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	buf->os_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	if (type == CQ_DESC_TYPE_RQ_FCP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 				   &type, &color, &q_number, &completed_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 				   &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 				   &tmpl, &fcp_bytes_written, &sof, &eof,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 				   &ingress_port, &packet_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 				   &fcoe_enc_error, &fcs_ok, &vlan_stripped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 				   &vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		skb_trim(skb, fcp_bytes_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		fr_sof(fp) = sof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		fr_eof(fp) = eof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	} else if (type == CQ_DESC_TYPE_RQ_ENET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 				    &type, &color, &q_number, &completed_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 				    &ingress_port, &fcoe, &eop, &sop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 				    &rss_type, &csum_not_calc, &rss_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 				    &bytes_written, &packet_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 				    &vlan_stripped, &vlan, &checksum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 				    &fcoe_sof, &fcoe_fc_crc_ok,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 				    &fcoe_enc_error, &fcoe_eof,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 				    &tcp_udp_csum_ok, &udp, &tcp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 				    &ipv4_csum_ok, &ipv6, &ipv4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 				    &ipv4_fragment, &fcs_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		skb_trim(skb, bytes_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		if (!fcs_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			atomic64_inc(&fnic_stats->misc_stats.frame_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 				     "fcs error.  dropping packet.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		if (fnic_import_rq_eth_pkt(fnic, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		/* wrong CQ type*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			     "fnic rq_cmpl wrong cq type x%x\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		atomic64_inc(&fnic_stats->misc_stats.frame_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			     "fnic rq_cmpl fcoe x%x fcsok x%x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			     " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			     " x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			     fcoe, fcs_ok, packet_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			     fcoe_fc_crc_ok, fcoe_enc_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	if (fnic->stop_rx_link_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	fr_dev(fp) = fnic->lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 					(char *)skb->data, skb->len)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		printk(KERN_ERR "fnic ctlr frame trace error!!!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	skb_queue_tail(&fnic->frame_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	queue_work(fnic_event_queue, &fnic->frame_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 				     struct cq_desc *cq_desc, u8 type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 				     u16 q_number, u16 completed_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 				     void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	struct fnic *fnic = vnic_dev_priv(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	unsigned int tot_rq_work_done = 0, cur_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	for (i = 0; i < fnic->rq_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 						fnic_rq_cmpl_handler_cont,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 						NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		if (cur_work_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 				shost_printk(KERN_ERR, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 					     "fnic_alloc_rq_frame can't alloc"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 					     " frame\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		tot_rq_work_done += cur_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	return tot_rq_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)  * This function is called once at init time to allocate and fill RQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965)  * buffers. Subsequently, it is called in the interrupt context after RQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  * buffer processing to replenish the buffers in the RQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) int fnic_alloc_rq_frame(struct vnic_rq *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	dma_addr_t pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	skb = dev_alloc_skb(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			     "Unable to allocate RQ sk_buff\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	skb_put(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		printk(KERN_ERR "PCI mapping failed with error %d\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		goto free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	fnic_queue_rq_desc(rq, skb, pa, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) free_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	struct fc_frame *fp = buf->os_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	dev_kfree_skb(fp_skb(fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	buf->os_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  * fnic_eth_send() - Send Ethernet frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  * @fip:	fcoe_ctlr instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  * @skb:	Ethernet Frame, FIP, without VLAN encapsulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	struct fnic *fnic = fnic_from_ctlr(fip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	struct vnic_wq *wq = &fnic->wq[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	dma_addr_t pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	struct ethhdr *eth_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	struct vlan_ethhdr *vlan_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	if (!fnic->vlan_hw_insert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		eth_hdr = (struct ethhdr *)skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			printk(KERN_ERR "fnic ctlr frame trace error!!!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 			printk(KERN_ERR "fnic ctlr frame trace error!!!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		printk(KERN_ERR "DMA mapping failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		goto free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	spin_lock_irqsave(&fnic->wq_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	if (!vnic_wq_desc_avail(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		goto irq_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			       0 /* hw inserts cos value */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			       fnic->vlan_id, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) irq_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) free_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)  * Send FC frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	struct vnic_wq *wq = &fnic->wq[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	dma_addr_t pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	struct ethhdr *eth_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	struct vlan_ethhdr *vlan_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	struct fcoe_hdr *fcoe_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	struct fc_frame_header *fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	u32 tot_len, eth_hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	fh = fc_frame_header_get(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	skb = fp_skb(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	    fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	if (!fnic->vlan_hw_insert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		vlan_hdr = skb_push(skb, eth_hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		eth_hdr = (struct ethhdr *)vlan_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		eth_hdr = skb_push(skb, eth_hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		eth_hdr->h_proto = htons(ETH_P_FCOE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	if (fnic->ctlr.map_dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	tot_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	BUG_ON(tot_len % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	fcoe_hdr->fcoe_sof = fr_sof(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	if (FC_FCOE_VER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		printk(KERN_ERR "DMA map failed with error %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		goto free_skb_on_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 				(char *)eth_hdr, tot_len)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		printk(KERN_ERR "fnic ctlr frame trace error!!!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	spin_lock_irqsave(&fnic->wq_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	if (!vnic_wq_desc_avail(wq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		goto irq_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 			   0 /* hw inserts cos value */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			   fnic->vlan_id, 1, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) irq_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) free_skb_on_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		dev_kfree_skb_any(fp_skb(fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)  * fnic_send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)  * Routine to send a raw frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	struct fnic *fnic = lport_priv(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	if (fnic->in_remove) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		dev_kfree_skb(fp_skb(fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	 * Queue frame if in a transitional state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	 * This occurs while registering the Port_ID / MAC address after FLOGI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	return fnic_send_frame(fnic, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  * fnic_flush_tx() - send queued frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)  * @fnic: fnic device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)  * Send frames that were waiting to go out in FC or Ethernet mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)  * Whenever changing modes we purge queued frames, so these frames should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)  * be queued for the stable mode that we're in, either FC or Ethernet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)  * Called without fnic_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) void fnic_flush_tx(struct fnic *fnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	struct fc_frame *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	while ((skb = skb_dequeue(&fnic->tx_queue))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		fp = (struct fc_frame *)skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		fnic_send_frame(fnic, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)  * fnic_set_eth_mode() - put fnic into ethernet mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)  * @fnic: fnic device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)  * Called without fnic lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static void fnic_set_eth_mode(struct fnic *fnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	enum fnic_state old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	old_state = fnic->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	switch (old_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	case FNIC_IN_FC_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	case FNIC_IN_ETH_TRANS_FC_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		ret = fnic_fw_reset_handler(fnic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			fnic->state = old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	case FNIC_IN_FC_TRANS_ETH_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	case FNIC_IN_ETH_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 					struct cq_desc *cq_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 					struct vnic_wq_buf *buf, void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	struct sk_buff *skb = buf->os_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	struct fc_frame *fp = (struct fc_frame *)skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	struct fnic *fnic = vnic_dev_priv(wq->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	dev_kfree_skb_irq(fp_skb(fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	buf->os_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 				     struct cq_desc *cq_desc, u8 type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 				     u16 q_number, u16 completed_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 				     void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	struct fnic *fnic = vnic_dev_priv(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 			fnic_wq_complete_frame_send, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	unsigned int wq_work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	for (i = 0; i < fnic->raw_wq_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		wq_work_done  += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 						 work_to_do,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 						 fnic_wq_cmpl_handler_cont,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 						 NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	return wq_work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	struct fc_frame *fp = buf->os_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	struct fnic *fnic = vnic_dev_priv(wq->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	dev_kfree_skb(fp_skb(fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	buf->os_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) void fnic_fcoe_reset_vlans(struct fnic *fnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	struct fcoe_vlan *vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	struct fcoe_vlan *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	 * indicate a link down to fcoe so that all fcf's are free'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	 * might not be required since we did this before sending vlan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	 * discovery request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	spin_lock_irqsave(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	if (!list_empty(&fnic->vlans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 			list_del(&vlan->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			kfree(vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) void fnic_handle_fip_timer(struct fnic *fnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	struct fcoe_vlan *vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	u64 sol_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	spin_lock_irqsave(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (fnic->stop_rx_link_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	spin_lock_irqsave(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	if (list_empty(&fnic->vlans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		/* no vlans available, try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 				  "Start VLAN Discovery\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	shost_printk(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		  "fip_timer: vlan %d state %d sol_count %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		  vlan->vid, vlan->state, vlan->sol_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	switch (vlan->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	case FIP_VLAN_USED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			  "FIP VLAN is selected for FC transaction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	case FIP_VLAN_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		/* if all vlans are in failed state, restart vlan disc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 				  "Start VLAN Discovery\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	case FIP_VLAN_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			 * no response on this vlan, remove  from the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			 * Try the next vlan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			shost_printk(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 				  "Dequeue this VLAN ID %d from list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 				  vlan->vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			list_del(&vlan->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 			kfree(vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			vlan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			if (list_empty(&fnic->vlans)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 				/* we exhausted all vlans, restart vlan disc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 				spin_unlock_irqrestore(&fnic->vlans_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 							flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 				shost_printk(KERN_INFO, fnic->lport->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 					  "fip_timer: vlan list empty, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 					  "trigger vlan disc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 				fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 			/* check the next vlan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 			vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 							list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 			fnic->set_vlan(fnic, vlan->vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			vlan->state = FIP_VLAN_SENT; /* sent now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		vlan->sol_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		sol_time = jiffies + msecs_to_jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 					(FCOE_CTLR_START_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }