^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/inetdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <net/neighbour.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <net/netevent.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <net/route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <net/ip6_route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <net/addrconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <rdma/ib_addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <libcxgb_cm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "iw_cxgb4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include "clip_tbl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static char *states[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) "idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) "listen",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) "connecting",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) "mpa_wait_req",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) "mpa_req_sent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) "mpa_req_rcvd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) "mpa_rep_sent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) "fpdu_mode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) "aborting",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) "closing",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) "moribund",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) "dead",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static int nocong;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) module_param(nocong, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static int enable_ecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) module_param(enable_ecn, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int dack_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) module_param(dack_mode, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) uint c4iw_max_read_depth = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) module_param(c4iw_max_read_depth, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) MODULE_PARM_DESC(c4iw_max_read_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) "Per-connection max ORD/IRD (default=32)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static int enable_tcp_timestamps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) module_param(enable_tcp_timestamps, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int enable_tcp_sack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) module_param(enable_tcp_sack, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static int enable_tcp_window_scaling = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) module_param(enable_tcp_window_scaling, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) MODULE_PARM_DESC(enable_tcp_window_scaling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) "Enable tcp window scaling (default=1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static int peer2peer = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) module_param(peer2peer, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) module_param(p2p_type, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) "1=RDMA_READ 0=RDMA_WRITE (default 1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static int ep_timeout_secs = 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) module_param(ep_timeout_secs, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) "in seconds (default=60)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int mpa_rev = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) module_param(mpa_rev, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) " compliant (default=2)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static int markers_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) module_param(markers_enabled, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static int crc_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) module_param(crc_enabled, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static int rcv_win = 256 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) module_param(rcv_win, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static int snd_win = 128 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) module_param(snd_win, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static struct workqueue_struct *workq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static struct sk_buff_head rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void ep_timeout(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static void connect_reply_upcall(struct c4iw_ep *ep, int status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static LIST_HEAD(timeout_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static spinlock_t timeout_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static void deref_cm_id(struct c4iw_ep_common *epc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) epc->cm_id->rem_ref(epc->cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) epc->cm_id = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) set_bit(CM_ID_DEREFED, &epc->history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void ref_cm_id(struct c4iw_ep_common *epc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) set_bit(CM_ID_REFED, &epc->history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) epc->cm_id->add_ref(epc->cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static void deref_qp(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) c4iw_qp_rem_ref(&ep->com.qp->ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) clear_bit(QP_REFERENCED, &ep->com.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) set_bit(QP_DEREFED, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void ref_qp(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) set_bit(QP_REFERENCED, &ep->com.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) set_bit(QP_REFED, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) c4iw_qp_add_ref(&ep->com.qp->ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void start_ep_timer(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) pr_debug("ep %p\n", ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (timer_pending(&ep->timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) pr_err("%s timer already started! ep %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) __func__, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) clear_bit(TIMEOUT, &ep->com.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) c4iw_get_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ep->timer.expires = jiffies + ep_timeout_secs * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) add_timer(&ep->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int stop_ep_timer(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) pr_debug("ep %p stopping\n", ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) del_timer_sync(&ep->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct l2t_entry *l2e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (c4iw_fatal_error(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pr_err("%s - device in error state - dropping\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) else if (error == NET_XMIT_DROP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return error < 0 ? error : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (c4iw_fatal_error(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) pr_err("%s - device in error state - dropping\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return error < 0 ? error : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u32 len = roundup(sizeof(struct cpl_tid_release), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) skb = get_skb(skb, len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) cxgb_mk_tid_release(skb, len, hwtid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) c4iw_ofld_send(rdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static void set_emss(struct c4iw_ep *ep, u16 opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ((AF_INET == ep->com.remote_addr.ss_family) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) sizeof(struct tcphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ep->mss = ep->emss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (TCPOPT_TSTAMP_G(opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (ep->emss < 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ep->emss = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (ep->emss & 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) TCPOPT_MSS_G(opt), ep->mss, ep->emss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ep->emss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) enum c4iw_ep_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) mutex_lock(&epc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) state = epc->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) mutex_unlock(&epc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) epc->state = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) mutex_lock(&epc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) pr_debug("%s -> %s\n", states[epc->state], states[new]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) __state_set(epc, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) mutex_unlock(&epc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) len = roundup(sizeof(union cpl_wr_size), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) skb = alloc_skb(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) skb_queue_tail(ep_skb_list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) skb_queue_purge(ep_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static void *alloc_ep(int size, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct c4iw_ep_common *epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) epc = kzalloc(size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (epc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) epc->wr_waitp = c4iw_alloc_wr_wait(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!epc->wr_waitp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) kfree(epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) epc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) kref_init(&epc->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) mutex_init(&epc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) c4iw_init_wr_wait(epc->wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) pr_debug("alloc ep %p\n", epc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return epc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static void remove_ep_tid(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) xa_lock_irqsave(&ep->com.dev->hwtids, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) __xa_erase(&ep->com.dev->hwtids, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (xa_empty(&ep->com.dev->hwtids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) wake_up(&ep->com.dev->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static int insert_ep_tid(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) xa_lock_irqsave(&ep->com.dev->hwtids, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) err = __xa_insert(&ep->com.dev->hwtids, ep->hwtid, ep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) xa_lock_irqsave(&dev->hwtids, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ep = xa_load(&dev->hwtids, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) c4iw_get_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) xa_unlock_irqrestore(&dev->hwtids, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) unsigned int stid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct c4iw_listen_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) xa_lock_irqsave(&dev->stids, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ep = xa_load(&dev->stids, stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) c4iw_get_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) xa_unlock_irqrestore(&dev->stids, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) void _c4iw_free_ep(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ep = container_of(kref, struct c4iw_ep, com.kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pr_debug("ep %p state %s\n", ep, states[ep->com.state]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (test_bit(QP_REFERENCED, &ep->com.flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) deref_qp(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (ep->com.remote_addr.ss_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct sockaddr_in6 *sin6 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) (struct sockaddr_in6 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) &ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) cxgb4_clip_release(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) (const u32 *)&sin6->sin6_addr.s6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ep->com.local_addr.ss_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) dst_release(ep->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) cxgb4_l2t_release(ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) kfree_skb(ep->mpa_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!skb_queue_empty(&ep->com.ep_skb_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) skb_queue_purge(&ep->com.ep_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) c4iw_put_wr_wait(ep->com.wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) kfree(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void release_ep_resources(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) set_bit(RELEASE_RESOURCES, &ep->com.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * If we have a hwtid, then remove it from the idr table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * so lookups will no longer find this endpoint. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * we have a race where one thread finds the ep ptr just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * before the other thread is freeing the ep memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (ep->hwtid != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) remove_ep_tid(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int status2errno(int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) case CPL_ERR_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) case CPL_ERR_CONN_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) case CPL_ERR_ARP_MISS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) case CPL_ERR_CONN_TIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) case CPL_ERR_TCAM_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) case CPL_ERR_CONN_EXIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * Try and reuse skbs already allocated...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) skb_trim(skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) skb = alloc_skb(len, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) t4_set_arp_err_handler(skb, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static struct net_device *get_real_dev(struct net_device *egress_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static void arp_failure_discard(void *handle, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) pr_err("ARP failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static void mpa_start_arp_failure(void *handle, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) pr_err("ARP failure during MPA Negotiation - Closing Connection\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) NUM_FAKE_CPLS = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) release_ep_resources(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) c4iw_put_ep(&ep->parent_ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) release_ep_resources(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * Fake up a special CPL opcode and call sched() so process_work() will call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * _put_ep_safe() in a safe context to free the ep resources. This is needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * because ARP error handlers are called in an ATOMIC context, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * _c4iw_free_ep() needs to block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) int cpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct cpl_act_establish *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* Set our special ARP_FAILURE opcode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) rpl->ot.opcode = cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Save ep in the skb->cb area, after where sched() will save the dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * ptr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) sched(ep->com.dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /* Handle an ARP failure for an accept */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct c4iw_ep *ep = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) pr_err("ARP failure during accept - tid %u - dropping connection\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) __state_set(&ep->com, DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Handle an ARP failure for an active open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct c4iw_ep *ep = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) pr_err("ARP failure during connect\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) connect_reply_upcall(ep, -EHOSTUNREACH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) __state_set(&ep->com, DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (ep->com.remote_addr.ss_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct sockaddr_in6 *sin6 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) (struct sockaddr_in6 *)&ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) (const u32 *)&sin6->sin6_addr.s6_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) xa_erase_irq(&ep->com.dev->atids, ep->atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * and send it along.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static void abort_arp_failure(void *handle, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct c4iw_ep *ep = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct c4iw_rdev *rdev = &ep->com.dev->rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct cpl_abort_req *req = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) pr_debug("rdev %p\n", rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) req->cmd = CPL_ABORT_NO_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ret = c4iw_ofld_send(rdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) __state_set(&ep->com, DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static int send_flowc(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct fw_flowc_wr *flowc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) u16 vlan = ep->l2t->vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) int nparams;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) int flowclen, flowclen16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (WARN_ON(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (vlan == CPL_L2T_VLAN_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) nparams = 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) nparams = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) flowclen16 = DIV_ROUND_UP(flowclen, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) flowclen = flowclen16 * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) flowc = __skb_put(skb, flowclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) memset(flowc, 0, flowclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) FW_FLOWC_WR_NPARAMS_V(nparams));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) FW_WR_FLOWID_V(ep->hwtid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) (ep->com.dev->rdev.lldi.pf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) flowc->mnemval[7].val = cpu_to_be32(ep->emss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (nparams == 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) u16 pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) flowc->mnemval[9].val = cpu_to_be32(pri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return c4iw_ofld_send(&ep->com.dev->rdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static int send_halfclose(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (WARN_ON(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) NULL, arp_failure_discard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static void read_tcb(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct cpl_get_tcb *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int wrlen = roundup(sizeof(*req), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (WARN_ON(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) req = (struct cpl_get_tcb *) skb_put(skb, wrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) memset(req, 0, wrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) INIT_TP_WR(req, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * keep a ref on the ep so the tcb is not unlocked before this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * cpl completes. The ref is released in read_tcb_rpl().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) c4iw_get_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static int send_abort_req(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (WARN_ON(!req_skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) ep, abort_arp_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static int send_abort(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (!ep->com.qp || !ep->com.qp->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) send_abort_req(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) read_tcb(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static int send_connect(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct cpl_act_open_req *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct cpl_t5_act_open_req *t5req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct cpl_t6_act_open_req *t6req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct cpl_act_open_req6 *req6 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct cpl_t5_act_open_req6 *t5req6 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct cpl_t6_act_open_req6 *t6req6 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) u64 opt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) u32 opt2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) unsigned int mtu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) u32 wscale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) int win, sizev4, sizev6, wrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct sockaddr_in *la = (struct sockaddr_in *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) &ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct sockaddr_in *ra = (struct sockaddr_in *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) &ep->com.remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) &ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) &ep->com.remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) u32 isn = (prandom_u32() & ~7UL) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) u64 params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) netdev = ep->com.dev->rdev.lldi.ports[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) switch (CHELSIO_CHIP_VERSION(adapter_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) case CHELSIO_T4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) sizev4 = sizeof(struct cpl_act_open_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) sizev6 = sizeof(struct cpl_act_open_req6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) case CHELSIO_T5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) sizev4 = sizeof(struct cpl_t5_act_open_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) sizev6 = sizeof(struct cpl_t5_act_open_req6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) case CHELSIO_T6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) sizev4 = sizeof(struct cpl_t6_act_open_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) sizev6 = sizeof(struct cpl_t6_act_open_req6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) pr_err("T%d Chip is not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) CHELSIO_CHIP_VERSION(adapter_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) roundup(sizev4, 16) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) roundup(sizev6, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) pr_debug("ep %p atid %u\n", ep, ep->atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) skb = get_skb(NULL, wrlen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) pr_err("%s - failed to alloc skb\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) enable_tcp_timestamps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) wscale = cxgb_compute_wscale(rcv_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * Specify the largest window that will fit in opt0. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * remainder will be specified in the rx_data_ack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) win = ep->rcv_win >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (win > RCV_BUFSIZ_M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) win = RCV_BUFSIZ_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) opt0 = (nocong ? NO_CONG_F : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) KEEP_ALIVE_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) DELACK_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) WND_SCALE_V(wscale) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) MSS_IDX_V(mtu_idx) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) L2T_IDX_V(ep->l2t->idx) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) TX_CHAN_V(ep->tx_chan) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) SMAC_SEL_V(ep->smac_idx) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) DSCP_V(ep->tos >> 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ULP_MODE_V(ULP_MODE_TCPDDP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) RCV_BUFSIZ_V(win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) opt2 = RX_CHANNEL_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) CCTRL_ECN_V(enable_ecn) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (enable_tcp_timestamps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) opt2 |= TSTAMPS_EN_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (enable_tcp_sack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) opt2 |= SACK_EN_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (wscale && enable_tcp_window_scaling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) opt2 |= WND_SCALE_EN_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (peer2peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) isn += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) opt2 |= T5_OPT_2_VALID_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) opt2 |= T5_ISS_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) params = cxgb4_select_ntuple(netdev, ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (ep->com.remote_addr.ss_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) (const u32 *)&la6->sin6_addr.s6_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (ep->com.remote_addr.ss_family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) switch (CHELSIO_CHIP_VERSION(adapter_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) case CHELSIO_T4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) req = skb_put(skb, wrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) INIT_TP_WR(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) case CHELSIO_T5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) t5req = skb_put(skb, wrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) INIT_TP_WR(t5req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) req = (struct cpl_act_open_req *)t5req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) case CHELSIO_T6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) t6req = skb_put(skb, wrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) INIT_TP_WR(t6req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) req = (struct cpl_act_open_req *)t6req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) t5req = (struct cpl_t5_act_open_req *)t6req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) pr_err("T%d Chip is not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) CHELSIO_CHIP_VERSION(adapter_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) goto clip_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ((ep->rss_qid<<14) | ep->atid)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) req->local_port = la->sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) req->peer_port = ra->sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) req->local_ip = la->sin_addr.s_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) req->peer_ip = ra->sin_addr.s_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) req->opt0 = cpu_to_be64(opt0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) req->params = cpu_to_be32(params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) req->opt2 = cpu_to_be32(opt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) t5req->params =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) cpu_to_be64(FILTER_TUPLE_V(params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) t5req->rsvd = cpu_to_be32(isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) pr_debug("snd_isn %u\n", t5req->rsvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) t5req->opt2 = cpu_to_be32(opt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) t6req->params =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) cpu_to_be64(FILTER_TUPLE_V(params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) t6req->rsvd = cpu_to_be32(isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) pr_debug("snd_isn %u\n", t6req->rsvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) t6req->opt2 = cpu_to_be32(opt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) switch (CHELSIO_CHIP_VERSION(adapter_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) case CHELSIO_T4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) req6 = skb_put(skb, wrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) INIT_TP_WR(req6, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) case CHELSIO_T5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) t5req6 = skb_put(skb, wrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) INIT_TP_WR(t5req6, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) req6 = (struct cpl_act_open_req6 *)t5req6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) case CHELSIO_T6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) t6req6 = skb_put(skb, wrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) INIT_TP_WR(t6req6, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) req6 = (struct cpl_act_open_req6 *)t6req6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) t5req6 = (struct cpl_t5_act_open_req6 *)t6req6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) pr_err("T%d Chip is not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) CHELSIO_CHIP_VERSION(adapter_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) goto clip_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) ((ep->rss_qid<<14)|ep->atid)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) req6->local_port = la6->sin6_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) req6->peer_port = ra6->sin6_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) req6->opt0 = cpu_to_be64(opt0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) req6->params = cpu_to_be32(cxgb4_select_ntuple(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ep->l2t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) req6->opt2 = cpu_to_be32(opt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) t5req6->params =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) cpu_to_be64(FILTER_TUPLE_V(params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) t5req6->rsvd = cpu_to_be32(isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) pr_debug("snd_isn %u\n", t5req6->rsvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) t5req6->opt2 = cpu_to_be32(opt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) t6req6->params =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) cpu_to_be64(FILTER_TUPLE_V(params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) t6req6->rsvd = cpu_to_be32(isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) pr_debug("snd_isn %u\n", t6req6->rsvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) t6req6->opt2 = cpu_to_be32(opt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) set_bit(ACT_OPEN_REQ, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) clip_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (ret && ep->com.remote_addr.ss_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) (const u32 *)&la6->sin6_addr.s6_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) u8 mpa_rev_to_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) int mpalen, wrlen, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct fw_ofld_tx_data_wr *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct mpa_message *mpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct mpa_v2_conn_params mpa_v2_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) pr_debug("ep %p tid %u pd_len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) ep, ep->hwtid, ep->plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) mpalen = sizeof(*mpa) + ep->plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (mpa_rev_to_use == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) mpalen += sizeof(struct mpa_v2_conn_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) wrlen = roundup(mpalen + sizeof(*req), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) skb = get_skb(skb, wrlen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) connect_reply_upcall(ep, -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) req = skb_put_zero(skb, wrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) req->op_to_immdlen = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) FW_WR_COMPL_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) FW_WR_IMMDLEN_V(mpalen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) req->flowid_len16 = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) FW_WR_FLOWID_V(ep->hwtid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) FW_WR_LEN16_V(wrlen >> 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) req->plen = cpu_to_be32(mpalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) req->tunnel_to_proxy = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) FW_OFLD_TX_DATA_WR_FLUSH_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) FW_OFLD_TX_DATA_WR_SHOVE_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) mpa = (struct mpa_message *)(req + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) mpa->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (crc_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) mpa->flags |= MPA_CRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (markers_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) mpa->flags |= MPA_MARKERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ep->mpa_attr.recv_marker_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ep->mpa_attr.recv_marker_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (mpa_rev_to_use == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) mpa->flags |= MPA_ENHANCED_RDMA_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) mpa->private_data_size = htons(ep->plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) mpa->revision = mpa_rev_to_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (mpa_rev_to_use == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) ep->tried_with_mpa_v1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) ep->retry_with_mpa_v1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (mpa_rev_to_use == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) mpa->private_data_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) htons(ntohs(mpa->private_data_size) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) sizeof(struct mpa_v2_conn_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) pr_debug("initiator ird %u ord %u\n", ep->ird,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ep->ord);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) mpa_v2_params.ird = htons((u16)ep->ird);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) mpa_v2_params.ord = htons((u16)ep->ord);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (peer2peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) mpa_v2_params.ord |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) htons(MPA_V2_RDMA_WRITE_RTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) mpa_v2_params.ord |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) htons(MPA_V2_RDMA_READ_RTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) memcpy(mpa->private_data, &mpa_v2_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) sizeof(struct mpa_v2_conn_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (ep->plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) memcpy(mpa->private_data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) sizeof(struct mpa_v2_conn_params),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ep->mpa_pkt + sizeof(*mpa), ep->plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (ep->plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) memcpy(mpa->private_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ep->mpa_pkt + sizeof(*mpa), ep->plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * Reference the mpa skb. This ensures the data area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * will remain in memory until the hw acks the tx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * Function fw4_ack() will deref it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) ep->mpa_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) start_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) __state_set(&ep->com, MPA_REQ_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) ep->mpa_attr.initiator = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) ep->snd_seq += mpalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) int mpalen, wrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct fw_ofld_tx_data_wr *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct mpa_message *mpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct mpa_v2_conn_params mpa_v2_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) pr_debug("ep %p tid %u pd_len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) ep, ep->hwtid, ep->plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) mpalen = sizeof(*mpa) + plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) mpalen += sizeof(struct mpa_v2_conn_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) wrlen = roundup(mpalen + sizeof(*req), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) skb = get_skb(NULL, wrlen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) pr_err("%s - cannot alloc skb!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) req = skb_put_zero(skb, wrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) req->op_to_immdlen = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) FW_WR_COMPL_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) FW_WR_IMMDLEN_V(mpalen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) req->flowid_len16 = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) FW_WR_FLOWID_V(ep->hwtid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) FW_WR_LEN16_V(wrlen >> 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) req->plen = cpu_to_be32(mpalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) req->tunnel_to_proxy = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) FW_OFLD_TX_DATA_WR_FLUSH_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) FW_OFLD_TX_DATA_WR_SHOVE_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) mpa = (struct mpa_message *)(req + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) memset(mpa, 0, sizeof(*mpa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) mpa->flags = MPA_REJECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) mpa->revision = ep->mpa_attr.version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) mpa->private_data_size = htons(plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) mpa->flags |= MPA_ENHANCED_RDMA_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) mpa->private_data_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) htons(ntohs(mpa->private_data_size) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) sizeof(struct mpa_v2_conn_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) mpa_v2_params.ird = htons(((u16)ep->ird) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) (peer2peer ? MPA_V2_PEER2PEER_MODEL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) (p2p_type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) MPA_V2_RDMA_WRITE_RTR : p2p_type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) FW_RI_INIT_P2PTYPE_READ_REQ ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) MPA_V2_RDMA_READ_RTR : 0) : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) memcpy(mpa->private_data, &mpa_v2_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) sizeof(struct mpa_v2_conn_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (ep->plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) memcpy(mpa->private_data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) sizeof(struct mpa_v2_conn_params), pdata, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) memcpy(mpa->private_data, pdata, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * Reference the mpa skb again. This ensures the data area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * will remain in memory until the hw acks the tx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * Function fw4_ack() will deref it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) ep->mpa_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) ep->snd_seq += mpalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) int mpalen, wrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct fw_ofld_tx_data_wr *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct mpa_message *mpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct mpa_v2_conn_params mpa_v2_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) pr_debug("ep %p tid %u pd_len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) ep, ep->hwtid, ep->plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) mpalen = sizeof(*mpa) + plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) mpalen += sizeof(struct mpa_v2_conn_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) wrlen = roundup(mpalen + sizeof(*req), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) skb = get_skb(NULL, wrlen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) pr_err("%s - cannot alloc skb!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) req = skb_put_zero(skb, wrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) req->op_to_immdlen = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) FW_WR_COMPL_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) FW_WR_IMMDLEN_V(mpalen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) req->flowid_len16 = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) FW_WR_FLOWID_V(ep->hwtid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) FW_WR_LEN16_V(wrlen >> 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) req->plen = cpu_to_be32(mpalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) req->tunnel_to_proxy = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) FW_OFLD_TX_DATA_WR_FLUSH_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) FW_OFLD_TX_DATA_WR_SHOVE_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) mpa = (struct mpa_message *)(req + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) memset(mpa, 0, sizeof(*mpa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) mpa->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (ep->mpa_attr.crc_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) mpa->flags |= MPA_CRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (ep->mpa_attr.recv_marker_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) mpa->flags |= MPA_MARKERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) mpa->revision = ep->mpa_attr.version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) mpa->private_data_size = htons(plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) mpa->flags |= MPA_ENHANCED_RDMA_CONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) mpa->private_data_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) htons(ntohs(mpa->private_data_size) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) sizeof(struct mpa_v2_conn_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) mpa_v2_params.ird = htons((u16)ep->ird);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) mpa_v2_params.ord = htons((u16)ep->ord);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (peer2peer && (ep->mpa_attr.p2p_type !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) FW_RI_INIT_P2PTYPE_DISABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) mpa_v2_params.ord |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) htons(MPA_V2_RDMA_WRITE_RTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) mpa_v2_params.ord |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) htons(MPA_V2_RDMA_READ_RTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) memcpy(mpa->private_data, &mpa_v2_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) sizeof(struct mpa_v2_conn_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (ep->plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) memcpy(mpa->private_data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) sizeof(struct mpa_v2_conn_params), pdata, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) memcpy(mpa->private_data, pdata, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * Reference the mpa skb. This ensures the data area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * will remain in memory until the hw acks the tx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * Function fw4_ack() will deref it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) ep->mpa_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) __state_set(&ep->com, MPA_REP_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) ep->snd_seq += mpalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct cpl_act_establish *req = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) unsigned short tcp_opt = ntohs(req->tcp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) unsigned int tid = GET_TID(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) struct tid_info *t = dev->rdev.lldi.tids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ep = lookup_atid(t, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) mutex_lock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) dst_confirm(ep->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) /* setup the hwtid for this connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) ep->hwtid = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) insert_ep_tid(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) ep->snd_seq = be32_to_cpu(req->snd_isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) ep->rcv_seq = be32_to_cpu(req->rcv_isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) set_emss(ep, tcp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /* dealloc the atid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) xa_erase_irq(&ep->com.dev->atids, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) cxgb4_free_atid(t, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) set_bit(ACT_ESTAB, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /* start MPA negotiation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) ret = send_flowc(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (ep->retry_with_mpa_v1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) ret = send_mpa_req(ep, skb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) ret = send_mpa_req(ep, skb, mpa_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) connect_reply_upcall(ep, -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) static void close_complete_upcall(struct c4iw_ep *ep, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct iw_cm_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) memset(&event, 0, sizeof(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) event.event = IW_CM_EVENT_CLOSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) event.status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (ep->com.cm_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) pr_debug("close complete delivered ep %p cm_id %p tid %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) ep, ep->com.cm_id, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) ep->com.cm_id->event_handler(ep->com.cm_id, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) deref_cm_id(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) set_bit(CLOSE_UPCALL, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) static void peer_close_upcall(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) struct iw_cm_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) memset(&event, 0, sizeof(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) event.event = IW_CM_EVENT_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (ep->com.cm_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) pr_debug("peer close delivered ep %p cm_id %p tid %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) ep, ep->com.cm_id, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) ep->com.cm_id->event_handler(ep->com.cm_id, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) set_bit(DISCONN_UPCALL, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static void peer_abort_upcall(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) struct iw_cm_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) memset(&event, 0, sizeof(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) event.event = IW_CM_EVENT_CLOSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) event.status = -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (ep->com.cm_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) ep->com.cm_id, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) ep->com.cm_id->event_handler(ep->com.cm_id, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) deref_cm_id(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) set_bit(ABORT_UPCALL, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) static void connect_reply_upcall(struct c4iw_ep *ep, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) struct iw_cm_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) pr_debug("ep %p tid %u status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) ep, ep->hwtid, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) memset(&event, 0, sizeof(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) event.event = IW_CM_EVENT_CONNECT_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) event.status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) memcpy(&event.local_addr, &ep->com.local_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) sizeof(ep->com.local_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) memcpy(&event.remote_addr, &ep->com.remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) sizeof(ep->com.remote_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if ((status == 0) || (status == -ECONNREFUSED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (!ep->tried_with_mpa_v1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /* this means MPA_v2 is used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) event.ord = ep->ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) event.ird = ep->ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) event.private_data_len = ep->plen -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) sizeof(struct mpa_v2_conn_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) event.private_data = ep->mpa_pkt +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) sizeof(struct mpa_message) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) sizeof(struct mpa_v2_conn_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /* this means MPA_v1 is used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) event.ord = cur_max_read_depth(ep->com.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) event.ird = cur_max_read_depth(ep->com.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) event.private_data_len = ep->plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) event.private_data = ep->mpa_pkt +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) sizeof(struct mpa_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) pr_debug("ep %p tid %u status %d\n", ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) ep->hwtid, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) set_bit(CONN_RPL_UPCALL, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) ep->com.cm_id->event_handler(ep->com.cm_id, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) deref_cm_id(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static int connect_request_upcall(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct iw_cm_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) memset(&event, 0, sizeof(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) event.event = IW_CM_EVENT_CONNECT_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) memcpy(&event.local_addr, &ep->com.local_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) sizeof(ep->com.local_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) memcpy(&event.remote_addr, &ep->com.remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) sizeof(ep->com.remote_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) event.provider_data = ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (!ep->tried_with_mpa_v1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) /* this means MPA_v2 is used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) event.ord = ep->ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) event.ird = ep->ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) event.private_data_len = ep->plen -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) sizeof(struct mpa_v2_conn_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) sizeof(struct mpa_v2_conn_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* this means MPA_v1 is used. Send max supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) event.ord = cur_max_read_depth(ep->com.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) event.ird = cur_max_read_depth(ep->com.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) event.private_data_len = ep->plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) c4iw_get_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) set_bit(CONNREQ_UPCALL, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) c4iw_put_ep(&ep->parent_ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static void established_upcall(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct iw_cm_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) memset(&event, 0, sizeof(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) event.event = IW_CM_EVENT_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) event.ird = ep->ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) event.ord = ep->ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (ep->com.cm_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) ep->com.cm_id->event_handler(ep->com.cm_id, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) set_bit(ESTAB_UPCALL, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) u32 credit_dack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) pr_debug("ep %p tid %u credits %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) ep, ep->hwtid, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) skb = get_skb(NULL, wrlen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) pr_err("update_rx_credits - cannot alloc skb!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) * If we couldn't specify the entire rcv window at connection setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) * due to the limit in the number of bits in the RCV_BUFSIZ field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * then add the overage in to the credits returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) RX_DACK_MODE_V(dack_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) credit_dack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) c4iw_ofld_send(&ep->com.dev->rdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) #define RELAXED_IRD_NEGOTIATION 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * process_mpa_reply - process streaming mode MPA reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * 0 upon success indicating a connect request was delivered to the ULP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * or the mpa request is incomplete but valid so far.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * 1 if a failure requires the caller to close the connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) * 2 if a failure requires the caller to abort the connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct mpa_message *mpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct mpa_v2_conn_params *mpa_v2_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) u16 plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) u16 resp_ird, resp_ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) u8 rtr_mismatch = 0, insuff_ird = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct c4iw_qp_attributes attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) enum c4iw_qp_attr_mask mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) int disconnect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * If we get more than the supported amount of private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * then we must fail this connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) goto err_stop_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * copy the new data into our accumulation buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) ep->mpa_pkt_len += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * if we don't even have the mpa message, then bail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (ep->mpa_pkt_len < sizeof(*mpa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) mpa = (struct mpa_message *) ep->mpa_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) /* Validate MPA header. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (mpa->revision > mpa_rev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) __func__, mpa_rev, mpa->revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) err = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) goto err_stop_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) err = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) goto err_stop_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) plen = ntohs(mpa->private_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * Fail if there's too much private data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (plen > MPA_MAX_PRIVATE_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) err = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) goto err_stop_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * If plen does not account for pkt size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) err = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) goto err_stop_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) ep->plen = (u8) plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * If we don't have all the pdata yet, then bail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * We'll continue process when more data arrives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (mpa->flags & MPA_REJECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) err = -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) goto err_stop_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) * Stop mpa timer. If it expired, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * we ignore the MPA reply. process_timeout()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * will abort the connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (stop_ep_timer(ep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * If we get here we have accumulated the entire mpa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * start reply message including private data. And
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * the MPA header is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) __state_set(&ep->com, FPDU_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) ep->mpa_attr.version = mpa->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (mpa->revision == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) ep->mpa_attr.enhanced_rdma_conn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (ep->mpa_attr.enhanced_rdma_conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) mpa_v2_params = (struct mpa_v2_conn_params *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) (ep->mpa_pkt + sizeof(*mpa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) resp_ird = ntohs(mpa_v2_params->ird) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) MPA_V2_IRD_ORD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) resp_ord = ntohs(mpa_v2_params->ord) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) MPA_V2_IRD_ORD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) pr_debug("responder ird %u ord %u ep ird %u ord %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) resp_ird, resp_ord, ep->ird, ep->ord);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * This is a double-check. Ideally, below checks are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * not required since ird/ord stuff has been taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * care of in c4iw_accept_cr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (ep->ird < resp_ord) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (RELAXED_IRD_NEGOTIATION && resp_ord <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) ep->com.dev->rdev.lldi.max_ordird_qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) ep->ird = resp_ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) insuff_ird = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) } else if (ep->ird > resp_ord) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) ep->ird = resp_ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (ep->ord > resp_ird) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (RELAXED_IRD_NEGOTIATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) ep->ord = resp_ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) insuff_ird = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (insuff_ird) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) ep->ird = resp_ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) ep->ord = resp_ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (ntohs(mpa_v2_params->ird) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) MPA_V2_PEER2PEER_MODEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (ntohs(mpa_v2_params->ord) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) MPA_V2_RDMA_WRITE_RTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) ep->mpa_attr.p2p_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) FW_RI_INIT_P2PTYPE_RDMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) else if (ntohs(mpa_v2_params->ord) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) MPA_V2_RDMA_READ_RTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) ep->mpa_attr.p2p_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) FW_RI_INIT_P2PTYPE_READ_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) } else if (mpa->revision == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (peer2peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) ep->mpa_attr.p2p_type = p2p_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) ep->mpa_attr.crc_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) ep->mpa_attr.recv_marker_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) ep->mpa_attr.p2p_type, p2p_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * If responder's RTR does not match with that of initiator, assign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * generated when moving QP to RTS state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * A TERM message will be sent after QP has moved to RTS state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if ((ep->mpa_attr.version == 2) && peer2peer &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) (ep->mpa_attr.p2p_type != p2p_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) rtr_mismatch = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) attrs.mpa_attr = ep->mpa_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) attrs.max_ird = ep->ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) attrs.max_ord = ep->ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) attrs.llp_stream_handle = ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) attrs.next_state = C4IW_QP_STATE_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) mask = C4IW_QP_ATTR_NEXT_STATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* bind QP and TID with INIT_WR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) err = c4iw_modify_qp(ep->com.qp->rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) ep->com.qp, mask, &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * If responder's RTR requirement did not match with what initiator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * supports, generate TERM message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (rtr_mismatch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) pr_err("%s: RTR mismatch, sending TERM\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) attrs.layer_etype = LAYER_MPA | DDP_LLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) attrs.ecode = MPA_NOMATCH_RTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) attrs.next_state = C4IW_QP_STATE_TERMINATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) attrs.send_term = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) disconnect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * Generate TERM if initiator IRD is not sufficient for responder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * provided ORD. Currently, we do the same behaviour even when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * responder provided IRD is also not sufficient as regards to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * initiator ORD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (insuff_ird) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) pr_err("%s: Insufficient IRD, sending TERM\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) attrs.layer_etype = LAYER_MPA | DDP_LLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) attrs.ecode = MPA_INSUFF_IRD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) attrs.next_state = C4IW_QP_STATE_TERMINATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) attrs.send_term = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) disconnect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) err_stop_timer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) stop_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) disconnect = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) connect_reply_upcall(ep, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) return disconnect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) * process_mpa_request - process streaming mode MPA request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * 0 upon success indicating a connect request was delivered to the ULP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * or the mpa request is incomplete but valid so far.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * 1 if a failure requires the caller to close the connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * 2 if a failure requires the caller to abort the connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct mpa_message *mpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) struct mpa_v2_conn_params *mpa_v2_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) u16 plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) * If we get more than the supported amount of private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) * then we must fail this connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) goto err_stop_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * Copy the new data into our accumulation buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) ep->mpa_pkt_len += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * If we don't even have the mpa message, then bail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * We'll continue process when more data arrives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (ep->mpa_pkt_len < sizeof(*mpa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) mpa = (struct mpa_message *) ep->mpa_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * Validate MPA Header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (mpa->revision > mpa_rev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) __func__, mpa_rev, mpa->revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) goto err_stop_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) goto err_stop_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) plen = ntohs(mpa->private_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) * Fail if there's too much private data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (plen > MPA_MAX_PRIVATE_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) goto err_stop_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * If plen does not account for pkt size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) goto err_stop_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) ep->plen = (u8) plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * If we don't have all the pdata yet, then bail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) * If we get here we have accumulated the entire mpa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) * start reply message including private data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) ep->mpa_attr.initiator = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) ep->mpa_attr.recv_marker_enabled = markers_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) ep->mpa_attr.version = mpa->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (mpa->revision == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) ep->tried_with_mpa_v1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (mpa->revision == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) ep->mpa_attr.enhanced_rdma_conn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (ep->mpa_attr.enhanced_rdma_conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) mpa_v2_params = (struct mpa_v2_conn_params *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) (ep->mpa_pkt + sizeof(*mpa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) ep->ird = ntohs(mpa_v2_params->ird) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) MPA_V2_IRD_ORD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) ep->ird = min_t(u32, ep->ird,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) cur_max_read_depth(ep->com.dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) ep->ord = ntohs(mpa_v2_params->ord) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) MPA_V2_IRD_ORD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) ep->ord = min_t(u32, ep->ord,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) cur_max_read_depth(ep->com.dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) pr_debug("initiator ird %u ord %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) ep->ird, ep->ord);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) if (peer2peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (ntohs(mpa_v2_params->ord) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) MPA_V2_RDMA_WRITE_RTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) ep->mpa_attr.p2p_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) FW_RI_INIT_P2PTYPE_RDMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) else if (ntohs(mpa_v2_params->ord) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) MPA_V2_RDMA_READ_RTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) ep->mpa_attr.p2p_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) FW_RI_INIT_P2PTYPE_READ_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) } else if (mpa->revision == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (peer2peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) ep->mpa_attr.p2p_type = p2p_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) ep->mpa_attr.p2p_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) __state_set(&ep->com, MPA_REQ_RCVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) /* drive upcall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) if (ep->parent_ep->com.state != DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (connect_request_upcall(ep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) goto err_unlock_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) goto err_unlock_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) mutex_unlock(&ep->parent_ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) err_unlock_parent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) mutex_unlock(&ep->parent_ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) err_stop_timer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) (void)stop_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) struct cpl_rx_data *hdr = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) unsigned int dlen = ntohs(hdr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) unsigned int tid = GET_TID(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) __u8 status = hdr->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) int disconnect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) ep = get_ep_from_tid(dev, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) skb_pull(skb, sizeof(*hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) skb_trim(skb, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) mutex_lock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) switch (ep->com.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) case MPA_REQ_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) update_rx_credits(ep, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) ep->rcv_seq += dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) disconnect = process_mpa_reply(ep, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) case MPA_REQ_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) update_rx_credits(ep, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) ep->rcv_seq += dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) disconnect = process_mpa_request(ep, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) case FPDU_MODE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) struct c4iw_qp_attributes attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) update_rx_credits(ep, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) pr_err("%s Unexpected streaming data." \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) " qpid %u ep %p state %d tid %u status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) __func__, ep->com.qp->wq.sq.qid, ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) ep->com.state, ep->hwtid, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) attrs.next_state = C4IW_QP_STATE_TERMINATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) disconnect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (disconnect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) enum chip_type adapter_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) adapter_type = ep->com.dev->rdev.lldi.adapter_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) * If this TCB had a srq buffer cached, then we must complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) * it. For user mode, that means saving the srqidx in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) * user/kernel status page for this qp. For kernel mode, just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) * synthesize the CQE now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T5 && srqidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (ep->com.qp->ibqp.uobject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) t4_set_wq_in_error(&ep->com.qp->wq, srqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) c4iw_flush_srqidx(ep->com.qp, srqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) u32 srqidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) int release = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) unsigned int tid = GET_TID(rpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) ep = get_ep_from_tid(dev, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (!ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) pr_warn("Abort rpl to freed endpoint\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (ep->com.qp && ep->com.qp->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(rpl->srqidx_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) mutex_lock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) switch (ep->com.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) case ABORTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) __state_set(&ep->com, DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) release = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (release) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) close_complete_upcall(ep, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) release_ep_resources(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) struct fw_ofld_connection_wr *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) unsigned int mtu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) u32 wscale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) struct sockaddr_in *sin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) int win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) req = __skb_put_zero(skb, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) ep->l2t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) sin = (struct sockaddr_in *)&ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) req->le.lport = sin->sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) req->le.u.ipv4.lip = sin->sin_addr.s_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) sin = (struct sockaddr_in *)&ep->com.remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) req->le.pport = sin->sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) req->le.u.ipv4.pip = sin->sin_addr.s_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) req->tcb.t_state_to_astid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) FW_OFLD_CONNECTION_WR_ASTID_V(atid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) req->tcb.cplrxdataack_cplpassacceptrpl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) req->tcb.tx_max = (__force __be32) jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) req->tcb.rcv_adv = htons(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) enable_tcp_timestamps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) wscale = cxgb_compute_wscale(rcv_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * Specify the largest window that will fit in opt0. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) * remainder will be specified in the rx_data_ack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) win = ep->rcv_win >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (win > RCV_BUFSIZ_M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) win = RCV_BUFSIZ_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) (nocong ? NO_CONG_F : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) KEEP_ALIVE_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) DELACK_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) WND_SCALE_V(wscale) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) MSS_IDX_V(mtu_idx) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) L2T_IDX_V(ep->l2t->idx) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) TX_CHAN_V(ep->tx_chan) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) SMAC_SEL_V(ep->smac_idx) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) DSCP_V(ep->tos >> 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) ULP_MODE_V(ULP_MODE_TCPDDP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) RCV_BUFSIZ_V(win));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) req->tcb.opt2 = (__force __be32) (PACE_V(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) RX_CHANNEL_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) CCTRL_ECN_V(enable_ecn) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (enable_tcp_timestamps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if (enable_tcp_sack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) req->tcb.opt2 |= (__force __be32)SACK_EN_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (wscale && enable_tcp_window_scaling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) set_bit(ACT_OFLD_CONN, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) * Some of the error codes above implicitly indicate that there is no TID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) * allocated with the result of an ACT_OPEN. We use this predicate to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) * that explicit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) static inline int act_open_has_tid(int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) return (status != CPL_ERR_TCAM_PARITY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) status != CPL_ERR_TCAM_MISS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) status != CPL_ERR_TCAM_FULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) status != CPL_ERR_CONN_EXIST_SYNRECV &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) status != CPL_ERR_CONN_EXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) static char *neg_adv_str(unsigned int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) case CPL_ERR_RTX_NEG_ADVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) return "Retransmit timeout";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) case CPL_ERR_PERSIST_NEG_ADVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) return "Persist timeout";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) case CPL_ERR_KEEPALV_NEG_ADVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) return "Keepalive timeout";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) return "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) ep->snd_win = snd_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) ep->rcv_win = rcv_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) pr_debug("snd_win %d rcv_win %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) ep->snd_win, ep->rcv_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) #define ACT_OPEN_RETRY_COUNT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) struct dst_entry *dst, struct c4iw_dev *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) struct neighbour *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) int err, step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) struct net_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) n = dst_neigh_lookup(dst, peer_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (n->dev->flags & IFF_LOOPBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (iptype == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) else if (IS_ENABLED(CONFIG_IPV6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) for_each_netdev(&init_net, pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (ipv6_chk_addr(&init_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) (struct in6_addr *)peer_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) pdev, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) pdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (!pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) n, pdev, rt_tos2priority(tos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (!ep->l2t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) dev_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) ep->mtu = pdev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) ep->tx_chan = cxgb4_port_chan(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) step = cdev->rdev.lldi.ntxq /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) cdev->rdev.lldi.nchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) ep->txq_idx = cxgb4_port_idx(pdev) * step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) step = cdev->rdev.lldi.nrxq /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) cdev->rdev.lldi.nchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) ep->ctrlq_idx = cxgb4_port_idx(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) ep->rss_qid = cdev->rdev.lldi.rxq_ids[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) cxgb4_port_idx(pdev) * step];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) dev_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) pdev = get_real_dev(n->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) n, pdev, rt_tos2priority(tos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (!ep->l2t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) ep->mtu = dst_mtu(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) ep->tx_chan = cxgb4_port_chan(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) step = cdev->rdev.lldi.ntxq /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) cdev->rdev.lldi.nchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) ep->txq_idx = cxgb4_port_idx(pdev) * step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) ep->ctrlq_idx = cxgb4_port_idx(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) step = cdev->rdev.lldi.nrxq /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) cdev->rdev.lldi.nchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) ep->rss_qid = cdev->rdev.lldi.rxq_ids[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) cxgb4_port_idx(pdev) * step];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (clear_mpa_v1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) ep->retry_with_mpa_v1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) ep->tried_with_mpa_v1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) neigh_release(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) static int c4iw_reconnect(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) struct sockaddr_in *laddr = (struct sockaddr_in *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) &ep->com.cm_id->m_local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) struct sockaddr_in *raddr = (struct sockaddr_in *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) &ep->com.cm_id->m_remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) &ep->com.cm_id->m_local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) &ep->com.cm_id->m_remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) int iptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) __u8 *ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) c4iw_init_wr_wait(ep->com.wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) /* When MPA revision is different on nodes, the node with MPA_rev=2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * tries to reconnect with MPA_rev 1 for the same EP through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * c4iw_reconnect(), where the same EP is assigned with new tid for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * further connection establishment. As we are using the same EP pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * for reconnect, few skbs are used during the previous c4iw_connect(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) * which leaves the EP with inadequate skbs for further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) * c4iw_reconnect(), Further causing a crash due to an empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) * skb_list() during peer_abort(). Allocate skbs which is already used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * Allocate an active TID to initiate a TCP connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) if (ep->atid == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) pr_err("%s - cannot alloc atid\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) err = xa_insert_irq(&ep->com.dev->atids, ep->atid, ep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) goto fail2a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) /* find a route */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) laddr->sin_addr.s_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) raddr->sin_addr.s_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) laddr->sin_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) raddr->sin_port, ep->com.cm_id->tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) iptype = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) ra = (__u8 *)&raddr->sin_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) get_real_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) laddr6->sin6_addr.s6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) raddr6->sin6_addr.s6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) laddr6->sin6_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) raddr6->sin6_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) ep->com.cm_id->tos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) raddr6->sin6_scope_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) iptype = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) ra = (__u8 *)&raddr6->sin6_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) if (!ep->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) pr_err("%s - cannot find route\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) err = -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) goto fail3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) ep->com.dev->rdev.lldi.adapter_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) ep->com.cm_id->tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) pr_err("%s - cannot alloc l2e\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) goto fail4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) ep->l2t->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) state_set(&ep->com, CONNECTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) ep->tos = ep->com.cm_id->tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) /* send connect request to rnic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) err = send_connect(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) cxgb4_l2t_release(ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) fail4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) dst_release(ep->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) fail3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) xa_erase_irq(&ep->com.dev->atids, ep->atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) fail2a:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) fail2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) * remember to send notification to upper layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) * We are in here so the upper layer is not aware that this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) * re-connect attempt and so, upper layer is still waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * response of 1st connect request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) connect_reply_upcall(ep, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) fail1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) struct cpl_act_open_rpl *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) unsigned int atid = TID_TID_G(AOPEN_ATID_G(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) ntohl(rpl->atid_status)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) struct tid_info *t = dev->rdev.lldi.tids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) struct sockaddr_in *la;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) struct sockaddr_in *ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) struct sockaddr_in6 *la6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) struct sockaddr_in6 *ra6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) ep = lookup_atid(t, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) la = (struct sockaddr_in *)&ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) ra = (struct sockaddr_in *)&ep->com.remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) pr_debug("ep %p atid %u status %u errno %d\n", ep, atid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) status, status2errno(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (cxgb_is_neg_adv(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) pr_debug("Connection problems for atid %u status %u (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) atid, status, neg_adv_str(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) ep->stats.connect_neg_adv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) mutex_lock(&dev->rdev.stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) dev->rdev.stats.neg_adv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) mutex_unlock(&dev->rdev.stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) set_bit(ACT_OPEN_RPL, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) * Log interesting failures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) case CPL_ERR_CONN_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) case CPL_ERR_CONN_TIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) case CPL_ERR_TCAM_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) mutex_lock(&dev->rdev.stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) dev->rdev.stats.tcam_full++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) mutex_unlock(&dev->rdev.stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) if (ep->com.local_addr.ss_family == AF_INET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) dev->rdev.lldi.enable_fw_ofld_conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) ntohl(rpl->atid_status))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) case CPL_ERR_CONN_EXIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) set_bit(ACT_RETRY_INUSE, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (ep->com.remote_addr.ss_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) struct sockaddr_in6 *sin6 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) (struct sockaddr_in6 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) &ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) cxgb4_clip_release(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) (const u32 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) &sin6->sin6_addr.s6_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) xa_erase_irq(&ep->com.dev->atids, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) cxgb4_free_atid(t, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) dst_release(ep->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) cxgb4_l2t_release(ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) c4iw_reconnect(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) if (ep->com.local_addr.ss_family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) atid, status, status2errno(status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) &la->sin_addr.s_addr, ntohs(la->sin_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) &ra->sin_addr.s_addr, ntohs(ra->sin_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) atid, status, status2errno(status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) connect_reply_upcall(ep, status2errno(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) state_set(&ep->com, DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) if (ep->com.remote_addr.ss_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) struct sockaddr_in6 *sin6 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) (struct sockaddr_in6 *)&ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) (const u32 *)&sin6->sin6_addr.s6_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) if (status && act_open_has_tid(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) ep->com.local_addr.ss_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) xa_erase_irq(&ep->com.dev->atids, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) cxgb4_free_atid(t, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) dst_release(ep->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) cxgb4_l2t_release(ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) struct cpl_pass_open_rpl *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) unsigned int stid = GET_TID(rpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if (!ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) pr_warn("%s stid %d lookup failure!\n", __func__, stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) pr_debug("ep %p status %d error %d\n", ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) rpl->status, status2errno(rpl->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) unsigned int stid = GET_TID(rpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) if (!ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) pr_warn("%s stid %d lookup failure!\n", __func__, stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) pr_debug("ep %p\n", ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) struct cpl_pass_accept_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) struct cpl_pass_accept_rpl *rpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) unsigned int mtu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) u64 opt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) u32 opt2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) u32 wscale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) int win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) enable_tcp_timestamps && req->tcpopt.tstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) wscale = cxgb_compute_wscale(rcv_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) * Specify the largest window that will fit in opt0. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) * remainder will be specified in the rx_data_ack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) win = ep->rcv_win >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) if (win > RCV_BUFSIZ_M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) win = RCV_BUFSIZ_M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) opt0 = (nocong ? NO_CONG_F : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) KEEP_ALIVE_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) DELACK_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) WND_SCALE_V(wscale) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) MSS_IDX_V(mtu_idx) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) L2T_IDX_V(ep->l2t->idx) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) TX_CHAN_V(ep->tx_chan) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) SMAC_SEL_V(ep->smac_idx) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) DSCP_V(ep->tos >> 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) ULP_MODE_V(ULP_MODE_TCPDDP) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) RCV_BUFSIZ_V(win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) opt2 = RX_CHANNEL_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) if (enable_tcp_timestamps && req->tcpopt.tstamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) opt2 |= TSTAMPS_EN_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) if (enable_tcp_sack && req->tcpopt.sack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) opt2 |= SACK_EN_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) if (wscale && enable_tcp_window_scaling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) opt2 |= WND_SCALE_EN_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) if (enable_ecn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) const struct tcphdr *tcph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) u32 hlen = ntohl(req->hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) IP_HDR_LEN_G(hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) tcph = (const void *)(req + 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (tcph->ece && tcph->cwr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) opt2 |= CCTRL_ECN_V(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (!is_t4(adapter_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) skb_trim(skb, roundup(sizeof(*rpl5), 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) rpl5 = (void *)rpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) INIT_TP_WR(rpl5, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) skb_trim(skb, sizeof(*rpl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) INIT_TP_WR(rpl, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) ep->hwtid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) u32 isn = (prandom_u32() & ~7UL) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) opt2 |= T5_OPT_2_VALID_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) opt2 |= T5_ISS_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) rpl5 = (void *)rpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (peer2peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) isn += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) rpl5->iss = cpu_to_be32(isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) rpl->opt0 = cpu_to_be64(opt0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) rpl->opt2 = cpu_to_be32(opt2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) pr_debug("c4iw_dev %p tid %u\n", dev, hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) skb_trim(skb, sizeof(struct cpl_tid_release));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) release_tid(&dev->rdev, hwtid, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) struct c4iw_ep *child_ep = NULL, *parent_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) struct cpl_pass_accept_req *req = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) struct tid_info *t = dev->rdev.lldi.tids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) unsigned int hwtid = GET_TID(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) __u8 local_ip[16], peer_ip[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) __be16 local_port, peer_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) struct sockaddr_in6 *sin6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) u16 peer_mss = ntohs(req->tcpopt.mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) int iptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) unsigned short hdrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) u8 tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) if (!parent_ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) pr_err("%s connect request on invalid stid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) __func__, stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) if (state_read(&parent_ep->com) != LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) pr_err("%s - listening ep not in LISTEN\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if (parent_ep->com.cm_id->tos_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) tos = parent_ep->com.cm_id->tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) &iptype, local_ip, peer_ip, &local_port, &peer_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) /* Find output route */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (iptype == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) , parent_ep, hwtid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) local_ip, peer_ip, ntohs(local_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) ntohs(peer_port), peer_mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) *(__be32 *)local_ip, *(__be32 *)peer_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) local_port, peer_port, tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) , parent_ep, hwtid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) local_ip, peer_ip, ntohs(local_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) ntohs(peer_port), peer_mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) local_ip, peer_ip, local_port, peer_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) tos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) ((struct sockaddr_in6 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) &parent_ep->com.local_addr)->sin6_scope_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) if (!dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) pr_err("%s - failed to find dst entry!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (!child_ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) pr_err("%s - failed to allocate ep entry!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) parent_ep->com.dev->rdev.lldi.adapter_type, tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) pr_err("%s - failed to allocate l2t entry!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) kfree(child_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) sizeof(struct tcphdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) child_ep->mtu = peer_mss + hdrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) skb_queue_head_init(&child_ep->com.ep_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) state_set(&child_ep->com, CONNECTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) child_ep->com.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) child_ep->com.cm_id = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) if (iptype == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) struct sockaddr_in *sin = (struct sockaddr_in *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) &child_ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) sin->sin_family = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) sin->sin_port = local_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) sin->sin_addr.s_addr = *(__be32 *)local_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) sin = (struct sockaddr_in *)&child_ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) sin->sin_family = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) sin->sin_port = ((struct sockaddr_in *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) &parent_ep->com.local_addr)->sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) sin->sin_addr.s_addr = *(__be32 *)local_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) sin->sin_family = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) sin->sin_port = peer_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) sin->sin_addr.s_addr = *(__be32 *)peer_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) sin6->sin6_family = PF_INET6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) sin6->sin6_port = local_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) sin6->sin6_family = PF_INET6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) sin6->sin6_port = ((struct sockaddr_in6 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) &parent_ep->com.local_addr)->sin6_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) sin6->sin6_family = PF_INET6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) sin6->sin6_port = peer_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) c4iw_get_ep(&parent_ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) child_ep->parent_ep = parent_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) child_ep->tos = tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) child_ep->dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) child_ep->hwtid = hwtid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) pr_debug("tx_chan %u smac_idx %u rss_qid %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) timer_setup(&child_ep->timer, ep_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) cxgb4_insert_tid(t, child_ep, hwtid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) child_ep->com.local_addr.ss_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) insert_ep_tid(child_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) if (accept_cr(child_ep, skb, req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) c4iw_put_ep(&parent_ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) release_ep_resources(child_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) if (iptype == 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) (const u32 *)&sin6->sin6_addr.s6_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) c4iw_put_ep(&child_ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) reject:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) reject_cr(dev, hwtid, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) if (parent_ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) c4iw_put_ep(&parent_ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) struct cpl_pass_establish *req = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) unsigned int tid = GET_TID(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) u16 tcp_opt = ntohs(req->tcp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) ep = get_ep_from_tid(dev, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) ep->snd_seq = be32_to_cpu(req->snd_isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) ep->rcv_seq = be32_to_cpu(req->rcv_isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) set_emss(ep, tcp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) dst_confirm(ep->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) mutex_lock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) ep->com.state = MPA_REQ_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) start_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) set_bit(PASS_ESTAB, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) ret = send_flowc(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) struct cpl_peer_close *hdr = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) struct c4iw_qp_attributes attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) int disconnect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) int release = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) unsigned int tid = GET_TID(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) ep = get_ep_from_tid(dev, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) dst_confirm(ep->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) set_bit(PEER_CLOSE, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) mutex_lock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) switch (ep->com.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) case MPA_REQ_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) __state_set(&ep->com, CLOSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) case MPA_REQ_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) __state_set(&ep->com, CLOSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) connect_reply_upcall(ep, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) case MPA_REQ_RCVD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) * We're gonna mark this puppy DEAD, but keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) * the reference on it until the ULP accepts or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) * rejects the CR. Also wake up anyone waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) * in rdma connection migration (see c4iw_accept_cr()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) __state_set(&ep->com, CLOSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) case MPA_REP_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) __state_set(&ep->com, CLOSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) case FPDU_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) start_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) __state_set(&ep->com, CLOSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) attrs.next_state = C4IW_QP_STATE_CLOSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) if (ret != -ECONNRESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) peer_close_upcall(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) disconnect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) case ABORTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) disconnect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) case CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) __state_set(&ep->com, MORIBUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) disconnect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) case MORIBUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) (void)stop_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) if (ep->com.cm_id && ep->com.qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) attrs.next_state = C4IW_QP_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) close_complete_upcall(ep, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) __state_set(&ep->com, DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) release = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) disconnect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) case DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) disconnect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (disconnect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) if (release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) release_ep_resources(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) complete_cached_srq_buffers(ep, ep->srqe_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) if (ep->com.cm_id && ep->com.qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) struct c4iw_qp_attributes attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) attrs.next_state = C4IW_QP_STATE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) peer_abort_upcall(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) release_ep_resources(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) struct cpl_abort_req_rss6 *req = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) struct sk_buff *rpl_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) struct c4iw_qp_attributes attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) int release = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) unsigned int tid = GET_TID(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) u32 srqidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) ep = get_ep_from_tid(dev, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) status = ABORT_RSS_STATUS_G(be32_to_cpu(req->srqidx_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) if (cxgb_is_neg_adv(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) ep->hwtid, status, neg_adv_str(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) ep->stats.abort_neg_adv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) mutex_lock(&dev->rdev.stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) dev->rdev.stats.neg_adv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) mutex_unlock(&dev->rdev.stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) goto deref_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) ep->com.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) set_bit(PEER_ABORT, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) * Wake up any threads in rdma_init() or rdma_fini().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) * However, this is not needed if com state is just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) * MPA_REQ_SENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) if (ep->com.state != MPA_REQ_SENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) mutex_lock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) switch (ep->com.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) case CONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) c4iw_put_ep(&ep->parent_ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) case MPA_REQ_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) (void)stop_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) case MPA_REQ_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) (void)stop_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) if (status != CPL_ERR_CONN_RESET || mpa_rev == 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) (mpa_rev == 2 && ep->tried_with_mpa_v1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) connect_reply_upcall(ep, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) * we just don't send notification upwards because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) * want to retry with mpa_v1 without upper layers even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) * knowing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) * do some housekeeping so as to re-initiate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) * connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) pr_info("%s: mpa_rev=%d. Retrying with mpav1\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) __func__, mpa_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) ep->retry_with_mpa_v1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) case MPA_REP_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) case MPA_REQ_RCVD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) case MORIBUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) case CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) stop_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) case FPDU_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) if (ep->com.qp && ep->com.qp->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) srqidx = ABORT_RSS_SRQIDX_G(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) be32_to_cpu(req->srqidx_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) if (srqidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) complete_cached_srq_buffers(ep, srqidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) /* Hold ep ref until finish_peer_abort() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) c4iw_get_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) __state_set(&ep->com, ABORTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) read_tcb(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) if (ep->com.cm_id && ep->com.qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) attrs.next_state = C4IW_QP_STATE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) ret = c4iw_modify_qp(ep->com.qp->rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) pr_err("%s - qp <- error failed!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) peer_abort_upcall(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) case ABORTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) case DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) goto deref_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) dst_confirm(ep->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) if (ep->com.state != ABORTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) __state_set(&ep->com, DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) /* we don't release if we want to retry with mpa_v1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) if (!ep->retry_with_mpa_v1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) release = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) if (WARN_ON(!rpl_skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) release = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) if (release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) release_ep_resources(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) else if (ep->retry_with_mpa_v1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) if (ep->com.remote_addr.ss_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) struct sockaddr_in6 *sin6 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) (struct sockaddr_in6 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) &ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) cxgb4_clip_release(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) (const u32 *)&sin6->sin6_addr.s6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) xa_erase_irq(&ep->com.dev->hwtids, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) ep->com.local_addr.ss_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) dst_release(ep->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) cxgb4_l2t_release(ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) c4iw_reconnect(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) deref_ep:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) /* Dereferencing ep, referenced in peer_abort_intr() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) struct c4iw_qp_attributes attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) struct cpl_close_con_rpl *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) int release = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) unsigned int tid = GET_TID(rpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) ep = get_ep_from_tid(dev, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) /* The cm_id may be null if we failed to connect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) mutex_lock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) set_bit(CLOSE_CON_RPL, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) switch (ep->com.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) case CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) __state_set(&ep->com, MORIBUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) case MORIBUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) (void)stop_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) if ((ep->com.cm_id) && (ep->com.qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) attrs.next_state = C4IW_QP_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) c4iw_modify_qp(ep->com.qp->rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) ep->com.qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) C4IW_QP_ATTR_NEXT_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) close_complete_upcall(ep, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) __state_set(&ep->com, DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) release = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) case ABORTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) case DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) if (release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) release_ep_resources(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) struct cpl_rdma_terminate *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) unsigned int tid = GET_TID(rpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) struct c4iw_qp_attributes attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) ep = get_ep_from_tid(dev, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) if (ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) if (ep->com.qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) pr_warn("TERM received tid %u qpid %u\n", tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) ep->com.qp->wq.sq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) attrs.next_state = C4IW_QP_STATE_TERMINATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) * when entering the TERM state the RNIC MUST initiate a CLOSE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) pr_warn("TERM received tid %u no ep/qp\n", tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) * Upcall from the adapter indicating data has been transmitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) * For us its just the single MPA request or reply. We can now free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) * the skb holding the mpa message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) struct cpl_fw4_ack *hdr = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) u8 credits = hdr->credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) unsigned int tid = GET_TID(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) ep = get_ep_from_tid(dev, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) pr_debug("ep %p tid %u credits %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) ep, ep->hwtid, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) if (credits == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) pr_debug("0 credit ack ep %p tid %u state %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) ep, ep->hwtid, state_read(&ep->com));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) dst_confirm(ep->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) if (ep->mpa_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) ep, ep->hwtid, state_read(&ep->com),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) ep->mpa_attr.initiator ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) mutex_lock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) kfree_skb(ep->mpa_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) ep->mpa_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) stop_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) int abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) struct c4iw_ep *ep = to_ep(cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) mutex_lock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) if (ep->com.state != MPA_REQ_RCVD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) return -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) set_bit(ULP_REJECT, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) if (mpa_rev == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) abort = send_mpa_reject(ep, pdata, pdata_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) stop_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) struct c4iw_qp_attributes attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) enum c4iw_qp_attr_mask mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) struct c4iw_ep *ep = to_ep(cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) int abort = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) pr_debug("ep %p tid %u\n", ep, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) mutex_lock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) if (ep->com.state != MPA_REQ_RCVD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) err = -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) if (!qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) set_bit(ULP_ACCEPT, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) goto err_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) if (conn_param->ord > ep->ird) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) if (RELAXED_IRD_NEGOTIATION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) conn_param->ord = ep->ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) ep->ird = conn_param->ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) ep->ord = conn_param->ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) send_mpa_reject(ep, conn_param->private_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) conn_param->private_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) goto err_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) if (conn_param->ird < ep->ord) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) if (RELAXED_IRD_NEGOTIATION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) ep->ord <= h->rdev.lldi.max_ordird_qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) conn_param->ird = ep->ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) goto err_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) ep->ird = conn_param->ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) ep->ord = conn_param->ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) if (ep->mpa_attr.version == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) if (peer2peer && ep->ird == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) ep->ird = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) if (peer2peer &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) ep->ird = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) pr_debug("ird %d ord %d\n", ep->ird, ep->ord);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) ep->com.cm_id = cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) ref_cm_id(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) ep->com.qp = qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) ref_qp(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) /* bind QP to EP and move to RTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) attrs.mpa_attr = ep->mpa_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) attrs.max_ird = ep->ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) attrs.max_ord = ep->ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) attrs.llp_stream_handle = ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) attrs.next_state = C4IW_QP_STATE_RTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) /* bind QP and TID with INIT_WR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) mask = C4IW_QP_ATTR_NEXT_STATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) C4IW_QP_ATTR_LLP_STREAM_HANDLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) C4IW_QP_ATTR_MPA_ATTR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) C4IW_QP_ATTR_MAX_IRD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) C4IW_QP_ATTR_MAX_ORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) err = c4iw_modify_qp(ep->com.qp->rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) ep->com.qp, mask, &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) goto err_deref_cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) set_bit(STOP_MPA_TIMER, &ep->com.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) err = send_mpa_reply(ep, conn_param->private_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) conn_param->private_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) goto err_deref_cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) __state_set(&ep->com, FPDU_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) established_upcall(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) err_deref_cm_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) deref_cm_id(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) err_abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) if (abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) struct in_device *ind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) const struct in_ifaddr *ifa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) ind = in_dev_get(dev->rdev.lldi.ports[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) if (!ind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) in_dev_for_each_ifa_rcu(ifa, ind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) if (ifa->ifa_flags & IFA_F_SECONDARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) laddr->sin_addr.s_addr = ifa->ifa_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) raddr->sin_addr.s_addr = ifa->ifa_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) in_dev_put(ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) return found ? 0 : -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) unsigned char banned_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) struct inet6_dev *idev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) int err = -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) idev = __in6_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) if (idev != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) struct inet6_ifaddr *ifp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) read_lock_bh(&idev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) list_for_each_entry(ifp, &idev->addr_list, if_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) if (ifp->scope == IFA_LINK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) !(ifp->flags & banned_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) memcpy(addr, &ifp->addr, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) read_unlock_bh(&idev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) struct in6_addr addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) memcpy(la6->sin6_addr.s6_addr, &addr, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) struct sockaddr_in *laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) struct sockaddr_in *raddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) struct sockaddr_in6 *laddr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) struct sockaddr_in6 *raddr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) __u8 *ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) int iptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) if ((conn_param->ord > cur_max_read_depth(dev)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) (conn_param->ird > cur_max_read_depth(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) if (!ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) pr_err("%s - cannot alloc ep\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) skb_queue_head_init(&ep->com.ep_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) timer_setup(&ep->timer, ep_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) ep->plen = conn_param->private_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) if (ep->plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) conn_param->private_data, ep->plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) ep->ird = conn_param->ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) ep->ord = conn_param->ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) if (peer2peer && ep->ord == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) ep->ord = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) ep->com.cm_id = cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) ref_cm_id(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) cm_id->provider_data = ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) ep->com.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) ep->com.qp = get_qhp(dev, conn_param->qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) if (!ep->com.qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) ref_qp(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) ep->com.qp, cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) * Allocate an active TID to initiate a TCP connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) if (ep->atid == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) pr_err("%s - cannot alloc atid\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) err = xa_insert_irq(&dev->atids, ep->atid, ep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) goto fail5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) sizeof(ep->com.local_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) sizeof(ep->com.remote_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) laddr = (struct sockaddr_in *)&ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) raddr = (struct sockaddr_in *)&ep->com.remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) if (cm_id->m_remote_addr.ss_family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) iptype = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) ra = (__u8 *)&raddr->sin_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) * Handle loopback requests to INADDR_ANY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) err = pick_local_ipaddrs(dev, cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) goto fail3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) /* find a route */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) &laddr->sin_addr, ntohs(laddr->sin_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) ra, ntohs(raddr->sin_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) laddr->sin_addr.s_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) raddr->sin_addr.s_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) laddr->sin_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) raddr->sin_port, cm_id->tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) iptype = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) ra = (__u8 *)&raddr6->sin6_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) * Handle loopback requests to INADDR_ANY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) err = pick_local_ip6addrs(dev, cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) goto fail3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) /* find a route */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) laddr6->sin6_addr.s6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) ntohs(laddr6->sin6_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) laddr6->sin6_addr.s6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) raddr6->sin6_addr.s6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) laddr6->sin6_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) raddr6->sin6_port, cm_id->tos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) raddr6->sin6_scope_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) if (!ep->dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) pr_err("%s - cannot find route\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) err = -EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) goto fail3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) pr_err("%s - cannot alloc l2e\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) goto fail4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) ep->l2t->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) state_set(&ep->com, CONNECTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) ep->tos = cm_id->tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) /* send connect request to rnic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) err = send_connect(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) cxgb4_l2t_release(ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) fail4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) dst_release(ep->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) fail3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) xa_erase_irq(&ep->com.dev->atids, ep->atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) fail5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) fail2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) skb_queue_purge(&ep->com.ep_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) deref_cm_id(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) fail1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) &ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) (const u32 *)&sin6->sin6_addr.s6_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) c4iw_init_wr_wait(ep->com.wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) ep->stid, &sin6->sin6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) sin6->sin6_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) ep->com.dev->rdev.lldi.rxq_ids[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) err = c4iw_wait_for_reply(&ep->com.dev->rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) ep->com.wr_waitp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 0, 0, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) else if (err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) err = net_xmit_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) (const u32 *)&sin6->sin6_addr.s6_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) err, ep->stid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) struct sockaddr_in *sin = (struct sockaddr_in *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) &ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) if (dev->rdev.lldi.enable_fw_ofld_conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) err = cxgb4_create_server_filter(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) ep->com.dev->rdev.lldi.ports[0], ep->stid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) sin->sin_addr.s_addr, sin->sin_port, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) if (err == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) if (c4iw_fatal_error(&ep->com.dev->rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) schedule_timeout(usecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) } while (err == -EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) c4iw_init_wr_wait(ep->com.wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) ep->stid, sin->sin_addr.s_addr, sin->sin_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) err = c4iw_wait_for_reply(&ep->com.dev->rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) ep->com.wr_waitp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 0, 0, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) else if (err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) err = net_xmit_errno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) , err, ep->stid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) &sin->sin_addr, ntohs(sin->sin_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) struct c4iw_listen_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) if (!ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) pr_err("%s - cannot alloc ep\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) skb_queue_head_init(&ep->com.ep_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) pr_debug("ep %p\n", ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) ep->com.cm_id = cm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) ref_cm_id(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) ep->com.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) ep->backlog = backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) sizeof(ep->com.local_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) * Allocate a server TID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) if (dev->rdev.lldi.enable_fw_ofld_conn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) ep->com.local_addr.ss_family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) cm_id->m_local_addr.ss_family, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) cm_id->m_local_addr.ss_family, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) if (ep->stid == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) pr_err("%s - cannot alloc stid\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) err = xa_insert_irq(&dev->stids, ep->stid, ep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) goto fail3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) state_set(&ep->com, LISTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) if (ep->com.local_addr.ss_family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) err = create_server4(dev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) err = create_server6(dev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) cm_id->provider_data = ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) xa_erase_irq(&ep->com.dev->stids, ep->stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) fail3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) ep->com.local_addr.ss_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) fail2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) deref_cm_id(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) fail1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) int c4iw_destroy_listen(struct iw_cm_id *cm_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) pr_debug("ep %p\n", ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) state_set(&ep->com, DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) ep->com.local_addr.ss_family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) err = cxgb4_remove_server_filter(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) ep->com.dev->rdev.lldi.ports[0], ep->stid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) ep->com.dev->rdev.lldi.rxq_ids[0], false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) struct sockaddr_in6 *sin6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) c4iw_init_wr_wait(ep->com.wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) err = cxgb4_remove_server(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) ep->com.dev->rdev.lldi.ports[0], ep->stid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) ep->com.dev->rdev.lldi.rxq_ids[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) ep->com.local_addr.ss_family == AF_INET6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 0, 0, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) (const u32 *)&sin6->sin6_addr.s6_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) xa_erase_irq(&ep->com.dev->stids, ep->stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) ep->com.local_addr.ss_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) deref_cm_id(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) int close = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) int fatal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) struct c4iw_rdev *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) mutex_lock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) pr_debug("ep %p state %s, abrupt %d\n", ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) states[ep->com.state], abrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) * Ref the ep here in case we have fatal errors causing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) * ep to be released and freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) c4iw_get_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) rdev = &ep->com.dev->rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) if (c4iw_fatal_error(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) fatal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) close_complete_upcall(ep, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) ep->com.state = DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) switch (ep->com.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) case MPA_REQ_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) case MPA_REQ_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) case MPA_REQ_RCVD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) case MPA_REP_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) case FPDU_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) case CONNECTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) close = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) if (abrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) ep->com.state = ABORTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) ep->com.state = CLOSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) * if we close before we see the fw4_ack() then we fix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) * up the timer state since we're reusing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) if (ep->mpa_skb &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) test_bit(STOP_MPA_TIMER, &ep->com.flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) clear_bit(STOP_MPA_TIMER, &ep->com.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) stop_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) start_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) set_bit(CLOSE_SENT, &ep->com.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) case CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) close = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) if (abrupt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) (void)stop_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) ep->com.state = ABORTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) ep->com.state = MORIBUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) case MORIBUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) case ABORTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) case DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) pr_debug("ignoring disconnect ep %p state %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) ep, ep->com.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) if (close) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) if (abrupt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) set_bit(EP_DISC_ABORT, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) ret = send_abort(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) set_bit(EP_DISC_CLOSE, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) ret = send_halfclose(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) set_bit(EP_DISC_FAIL, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) if (!abrupt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) stop_ep_timer(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) close_complete_upcall(ep, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) if (ep->com.qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) struct c4iw_qp_attributes attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) attrs.next_state = C4IW_QP_STATE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) ret = c4iw_modify_qp(ep->com.qp->rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) ep->com.qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) C4IW_QP_ATTR_NEXT_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) pr_err("%s - qp <- error failed!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) fatal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) if (fatal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) release_ep_resources(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) int atid = be32_to_cpu(req->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) (__force u32) req->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) switch (req->retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) case FW_ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) set_bit(ACT_RETRY_NOMEM, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) send_fw_act_open_req(ep, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) case FW_EADDRINUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) set_bit(ACT_RETRY_INUSE, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) send_fw_act_open_req(ep, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) pr_info("%s unexpected ofld conn wr retval %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) __func__, req->retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) pr_err("active ofld_connect_wr failure %d atid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) req->retval, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) mutex_lock(&dev->rdev.stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) dev->rdev.stats.act_ofld_conn_fails++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) mutex_unlock(&dev->rdev.stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) connect_reply_upcall(ep, status2errno(req->retval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) state_set(&ep->com, DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) if (ep->com.remote_addr.ss_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) struct sockaddr_in6 *sin6 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) (struct sockaddr_in6 *)&ep->com.local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) (const u32 *)&sin6->sin6_addr.s6_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) xa_erase_irq(&dev->atids, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) cxgb4_free_atid(dev->rdev.lldi.tids, atid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) dst_release(ep->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) cxgb4_l2t_release(ep->l2t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) struct sk_buff *rpl_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) struct cpl_pass_accept_req *cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) if (req->retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) pr_err("%s passive open failure %d\n", __func__, req->retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) mutex_lock(&dev->rdev.stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) dev->rdev.stats.pas_ofld_conn_fails++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) mutex_unlock(&dev->rdev.stats.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) kfree_skb(rpl_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) (__force u32) htonl(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) (__force u32) req->tid)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) ret = pass_accept_req(dev, rpl_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) kfree_skb(rpl_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) static inline u64 t4_tcb_get_field64(__be64 *tcb, u16 word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) u64 tlo = be64_to_cpu(tcb[((31 - word) / 2)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) u64 thi = be64_to_cpu(tcb[((31 - word) / 2) - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) u64 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) u32 shift = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) t = (thi << shift) | (tlo >> shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) static inline u32 t4_tcb_get_field32(__be64 *tcb, u16 word, u32 mask, u32 shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) u64 t = be64_to_cpu(tcb[(31 - word) / 2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) if (word & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) shift += 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) v = (t >> shift) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) return v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) struct cpl_get_tcb_rpl *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) __be64 *tcb = (__be64 *)(rpl + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) unsigned int tid = GET_TID(rpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) u64 t_flags_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) u32 rx_pdu_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) ep = get_ep_from_tid(dev, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) /* Examine the TF_RX_PDU_OUT (bit 49 of the t_flags) in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) * determine if there's a rx PDU feedback event pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) * If that bit is set, it means we'll need to re-read the TCB's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) * rq_start value. The final value is the one present in a TCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) * with the TF_RX_PDU_OUT bit cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) t_flags_64 = t4_tcb_get_field64(tcb, TCB_T_FLAGS_W);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) rx_pdu_out = (t_flags_64 & TF_RX_PDU_OUT_V(1)) >> TF_RX_PDU_OUT_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) c4iw_put_ep(&ep->com); /* from read_tcb() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) /* If TF_RX_PDU_OUT bit is set, re-read the TCB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) if (rx_pdu_out) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) if (++ep->rx_pdu_out_cnt >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) WARN_ONCE(1, "tcb re-read() reached the guard limit, finishing the cleanup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) read_tcb(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) TCB_RQ_START_S);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) finish_peer_abort(dev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) send_abort_req(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) WARN_ONCE(1, "unexpected state!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) struct cpl_fw6_msg *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) switch (rpl->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) case FW6_TYPE_CQE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) switch (req->t_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) case TCP_SYN_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) active_ofld_conn_reply(dev, skb, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) case TCP_SYN_RECV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) passive_ofld_conn_reply(dev, skb, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) pr_err("%s unexpected ofld conn wr state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) __func__, req->t_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) __be32 l2info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) __be16 hdr_len, vlantag, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) u16 eth_hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) int tcp_hdr_len, ip_hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) u8 intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) struct cpl_rx_pkt *cpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) struct cpl_pass_accept_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) struct tcp_options_received tmp_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) struct c4iw_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) enum chip_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) /* Store values from cpl_rx_pkt in temporary location. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) vlantag = cpl->vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) len = cpl->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) l2info = cpl->l2info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) hdr_len = cpl->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) intf = cpl->iff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) * We need to parse the TCP options from SYN packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) * to generate cpl_pass_accept_req.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) memset(&tmp_opt, 0, sizeof(tmp_opt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) tcp_clear_options(&tmp_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) req = __skb_push(skb, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) SYN_MAC_IDX_V(RX_MACIDX_G(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) be32_to_cpu(l2info))) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) SYN_XACT_MATCH_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) type = dev->rdev.lldi.adapter_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) req->hdr_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) eth_hdr_len = is_t4(type) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) IP_HDR_LEN_V(ip_hdr_len) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) ETH_HDR_LEN_V(eth_hdr_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) } else { /* T6 and later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) T6_IP_HDR_LEN_V(ip_hdr_len) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) T6_ETH_HDR_LEN_V(eth_hdr_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) req->vlan = vlantag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) req->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) PASS_OPEN_TOS_V(tos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) req->tcpopt.mss = htons(tmp_opt.mss_clamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) if (tmp_opt.wscale_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) req->tcpopt.wsf = tmp_opt.snd_wscale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) req->tcpopt.tstamp = tmp_opt.saw_tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) if (tmp_opt.sack_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) req->tcpopt.sack = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) __be32 laddr, __be16 lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) __be32 raddr, __be16 rport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) u32 rcv_isn, u32 filter, u16 window,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) u32 rss_qid, u8 port_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) struct sk_buff *req_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) struct fw_ofld_connection_wr *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) struct cpl_pass_accept_req *cpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) if (!req_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) req = __skb_put_zero(req_skb, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) req->le.filter = (__force __be32) filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) req->le.lport = lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) req->le.pport = rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) req->le.u.ipv4.lip = laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) req->le.u.ipv4.pip = raddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) req->tcb.rcv_nxt = htonl(rcv_isn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) req->tcb.rcv_adv = htons(window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) req->tcb.t_state_to_astid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) FW_OFLD_CONNECTION_WR_ASTID_V(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) * We store the qid in opt2 which will be used by the firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) * to send us the wr response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) * We initialize the MSS index in TCB to 0xF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) * So that when driver sends cpl_pass_accept_rpl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) * TCB picks up the correct value. If this was 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) * TP will ignore any value > 0 for MSS index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) req->cookie = (uintptr_t)skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) kfree_skb(req_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) * messages when a filter is being used instead of server to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) * redirect a syn packet. When packets hit filter they are redirected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) * to the offload queue and driver tries to establish the connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) * using firmware work request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) int stid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) unsigned int filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) struct ethhdr *eh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) struct vlan_ethhdr *vlan_eh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) struct iphdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) struct tcphdr *tcph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) struct rss_header *rss = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) struct cpl_rx_pkt *cpl = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) struct cpl_pass_accept_req *req = (void *)(rss + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) struct l2t_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) struct c4iw_ep *lep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) u16 window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) struct port_info *pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) struct net_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) u16 rss_qid, eth_hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) int step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) struct neighbour *neigh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) /* Drop all non-SYN packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) * Drop all packets which did not hit the filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) * Unlikely to happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) if (!(rss->filter_hit && rss->filter_tid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) * Calculate the server tid from filter hit index from cpl_rx_pkt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) if (!lep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) pr_warn("%s connect request on invalid stid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) __func__, stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) case CHELSIO_T4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) case CHELSIO_T5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) case CHELSIO_T6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) pr_err("T%d Chip is not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) if (eth_hdr_len == ETH_HLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) eh = (struct ethhdr *)(req + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) iph = (struct iphdr *)(eh + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) vlan_eh = (struct vlan_ethhdr *)(req + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) iph = (struct iphdr *)(vlan_eh + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) if (iph->version != 0x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) tcph = (struct tcphdr *)(iph + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) skb_set_network_header(skb, (void *)iph - (void *)rss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) skb_set_transport_header(skb, (void *)tcph - (void *)rss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) ntohs(tcph->source), iph->tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) iph->daddr, iph->saddr, tcph->dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) tcph->source, iph->tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) if (!dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) pr_err("%s - failed to find dst entry!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) goto reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) neigh = dst_neigh_lookup_skb(dst, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) if (!neigh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) pr_err("%s - failed to allocate neigh!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) goto free_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) if (neigh->dev->flags & IFF_LOOPBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) pdev = ip_dev_find(&init_net, iph->daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) pi = (struct port_info *)netdev_priv(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) dev_put(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) pdev = get_real_dev(neigh->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) pi = (struct port_info *)netdev_priv(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) neigh_release(neigh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) if (!e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) pr_err("%s - failed to allocate l2t entry!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) goto free_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) window = (__force u16) htons((__force u16)tcph->window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) /* Calcuate filter portion for LE region. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) dev->rdev.lldi.ports[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) e));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) * Synthesize the cpl_pass_accept_req. We have everything except the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) * TID. Once firmware sends a reply with TID we update the TID field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) * in cpl and pass it through the regular cpl_pass_accept_req path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) build_cpl_pass_accept_req(skb, stid, iph->tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) tcph->source, ntohl(tcph->seq), filter, window,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) rss_qid, pi->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) cxgb4_l2t_release(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) free_dst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) reject:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) if (lep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) c4iw_put_ep(&lep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) * These are the real handlers that are called from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) * work queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) [CPL_ACT_ESTABLISH] = act_establish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) [CPL_ACT_OPEN_RPL] = act_open_rpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) [CPL_RX_DATA] = rx_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) [CPL_ABORT_RPL_RSS] = abort_rpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) [CPL_ABORT_RPL] = abort_rpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) [CPL_PASS_OPEN_RPL] = pass_open_rpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) [CPL_PASS_ESTABLISH] = pass_establish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) [CPL_PEER_CLOSE] = peer_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) [CPL_ABORT_REQ_RSS] = peer_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) [CPL_CLOSE_CON_RPL] = close_con_rpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) [CPL_RDMA_TERMINATE] = terminate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) [CPL_FW4_ACK] = fw4_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) [CPL_GET_TCB_RPL] = read_tcb_rpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) [CPL_FW6_MSG] = deferred_fw6_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) [CPL_RX_PKT] = rx_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) static void process_timeout(struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) struct c4iw_qp_attributes attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) int abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) mutex_lock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) set_bit(TIMEDOUT, &ep->com.history);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) switch (ep->com.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) case MPA_REQ_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) connect_reply_upcall(ep, -ETIMEDOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) case MPA_REQ_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) case MPA_REQ_RCVD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) case MPA_REP_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) case FPDU_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) case CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) case MORIBUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) if (ep->com.cm_id && ep->com.qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) attrs.next_state = C4IW_QP_STATE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) c4iw_modify_qp(ep->com.qp->rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) close_complete_upcall(ep, -ETIMEDOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) case ABORTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) case DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) * These states are expected if the ep timed out at the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) * time as another thread was calling stop_ep_timer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) * So we silently do nothing for these states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) abort = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) WARN(1, "%s unexpected state ep %p tid %u state %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) __func__, ep, ep->hwtid, ep->com.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) abort = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) mutex_unlock(&ep->com.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) if (abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) static void process_timedout_eps(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) spin_lock_irq(&timeout_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) while (!list_empty(&timeout_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) struct list_head *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) tmp = timeout_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) list_del(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) tmp->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) tmp->prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) spin_unlock_irq(&timeout_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) ep = list_entry(tmp, struct c4iw_ep, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) process_timeout(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) spin_lock_irq(&timeout_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) spin_unlock_irq(&timeout_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) static void process_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) struct c4iw_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) struct cpl_act_establish *rpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) unsigned int opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) process_timedout_eps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) while ((skb = skb_dequeue(&rxq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) opcode = rpl->ot.opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) if (opcode >= ARRAY_SIZE(work_handlers) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) !work_handlers[opcode]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) pr_err("No handler for opcode 0x%x.\n", opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) ret = work_handlers[opcode](dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) process_timedout_eps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) static DECLARE_WORK(skb_work, process_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) static void ep_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) struct c4iw_ep *ep = from_timer(ep, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) int kickit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) spin_lock(&timeout_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) * Only insert if it is not already on the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) if (!ep->entry.next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) list_add_tail(&ep->entry, &timeout_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) kickit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) spin_unlock(&timeout_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) if (kickit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) queue_work(workq, &skb_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) * All the CM events are handled on a work queue to have a safe context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) * Save dev in the skb->cb area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) * Queue the skb and schedule the worker thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) skb_queue_tail(&rxq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) queue_work(workq, &skb_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) if (rpl->status != CPL_ERR_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) rpl->status, GET_TID(rpl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) struct cpl_fw6_msg *rpl = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) struct c4iw_wr_wait *wr_waitp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) pr_debug("type %u\n", rpl->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) switch (rpl->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) case FW6_TYPE_WR_RPL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) if (wr_waitp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) c4iw_wake_up_deref(wr_waitp, ret ? -ret : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) case FW6_TYPE_CQE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) sched(dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) pr_err("%s unexpected fw6 msg type %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) __func__, rpl->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) struct cpl_abort_req_rss *req = cplhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) struct c4iw_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) unsigned int tid = GET_TID(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) ep = get_ep_from_tid(dev, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) /* This EP will be dereferenced in peer_abort() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) if (!ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) pr_warn("Abort on non-existent endpoint, tid %d\n", tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) if (cxgb_is_neg_adv(req->status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) ep->hwtid, req->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) neg_adv_str(req->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) sched(dev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) * Most upcalls from the T4 Core go to sched() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) * schedule the processing on a work queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) [CPL_ACT_ESTABLISH] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) [CPL_ACT_OPEN_RPL] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) [CPL_RX_DATA] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) [CPL_ABORT_RPL_RSS] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) [CPL_ABORT_RPL] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) [CPL_PASS_OPEN_RPL] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) [CPL_CLOSE_LISTSRV_RPL] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) [CPL_PASS_ACCEPT_REQ] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) [CPL_PASS_ESTABLISH] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) [CPL_PEER_CLOSE] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) [CPL_CLOSE_CON_RPL] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) [CPL_ABORT_REQ_RSS] = peer_abort_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) [CPL_RDMA_TERMINATE] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) [CPL_FW4_ACK] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) [CPL_SET_TCB_RPL] = set_tcb_rpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) [CPL_GET_TCB_RPL] = sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) [CPL_FW6_MSG] = fw6_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) [CPL_RX_PKT] = sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) int __init c4iw_cm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) spin_lock_init(&timeout_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) skb_queue_head_init(&rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) if (!workq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) void c4iw_cm_term(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) WARN_ON(!list_empty(&timeout_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) flush_workqueue(workq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) destroy_workqueue(workq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) }