^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2016 Chelsio Communications, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef __CXGBIT_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define __CXGBIT_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <target/iscsi/iscsi_transport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <iscsi_target_parameters.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <iscsi_target_login.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "t4_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "t4_msg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "cxgb4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "cxgb4_uld.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "l2t.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "libcxgb_ppm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "cxgbit_lro.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) extern struct mutex cdev_list_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) extern struct list_head cdev_list_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct cxgbit_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct cxgbit_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct cxgbit_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct cxgbi_task_tag_info ttinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) bool setup_ddp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bool release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define CXGBIT_MAX_ISO_PAYLOAD \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct cxgbit_iso_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u32 mpdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u32 burst_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) enum cxgbit_skcb_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) SKCBF_TX_NEED_HDR = (1 << 0), /* packet needs a header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) SKCBF_TX_FLAG_COMPL = (1 << 1), /* wr completion flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) SKCBF_TX_ISO = (1 << 2), /* iso cpl in tx skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) SKCBF_RX_LRO = (1 << 3), /* lro skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct cxgbit_skb_rx_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u8 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void *pdu_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct cxgbit_skb_tx_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u8 submode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 extra_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) union cxgbit_skb_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct cxgbit_skb_tx_cb tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct cxgbit_skb_rx_cb rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* This member must be first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct l2t_skb_cb l2t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct sk_buff *wr_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static inline void *cplhdr(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) enum cxgbit_cdev_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) CDEV_STATE_UP = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) CDEV_ISO_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) CDEV_DDP_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define NP_INFO_HASH_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct np_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct np_info *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct cxgbit_np *cnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned int stid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct cxgbit_list_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* device lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct cxgbit_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct cxgb4_lld_info lldi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct np_info *np_hash_tab[NP_INFO_HASH_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* np lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) spinlock_t np_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u8 selectq[MAX_NPORTS][2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct cxgbit_list_head cskq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u32 mdsl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct cxgbit_wr_wait {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) enum cxgbit_csk_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) CSK_STATE_IDLE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) CSK_STATE_LISTEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) CSK_STATE_CONNECTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) CSK_STATE_ESTABLISHED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) CSK_STATE_ABORTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) CSK_STATE_CLOSING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) CSK_STATE_MORIBUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) CSK_STATE_DEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) enum cxgbit_csk_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) CSK_TX_DATA_SENT = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) CSK_LOGIN_PDU_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) CSK_LOGIN_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) CSK_DDP_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) CSK_ABORT_RPL_WAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct cxgbit_sock_common {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct cxgbit_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct sockaddr_storage local_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct sockaddr_storage remote_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct cxgbit_wr_wait wr_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) enum cxgbit_csk_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct cxgbit_np {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct cxgbit_sock_common com;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) wait_queue_head_t accept_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct iscsi_np *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct completion accept_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct list_head np_accept_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* np accept lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) spinlock_t np_accept_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned int stid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct cxgbit_sock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct cxgbit_sock_common com;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct cxgbit_np *cnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct iscsi_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct l2t_entry *l2t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct sk_buff_head rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct sk_buff_head txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct sk_buff_head ppodq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct sk_buff_head backlogq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct sk_buff_head skbq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct sk_buff *wr_pending_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct sk_buff *wr_pending_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct sk_buff *lro_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct sk_buff *lro_hskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct list_head accept_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* socket lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) wait_queue_head_t waitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) bool lock_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) u32 max_iso_npdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) u32 wr_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) u32 wr_una_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) u32 wr_max_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u32 snd_una;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u32 tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u32 snd_nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u32 rcv_nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u32 smac_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u32 tx_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u32 mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u32 write_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u32 rx_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u32 snd_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 rcv_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u16 mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u16 emss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) u16 plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) u16 rss_qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u16 txq_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u16 ctrlq_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u8 tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u8 port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #define CXGBIT_SUBMODE_HCRC 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define CXGBIT_SUBMODE_DCRC 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u8 submode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #ifdef CONFIG_CHELSIO_T4_DCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u8 dcb_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u8 snd_wscale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) void _cxgbit_free_cdev(struct kref *kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) void _cxgbit_free_csk(struct kref *kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) void _cxgbit_free_cnp(struct kref *kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static inline void cxgbit_get_cdev(struct cxgbit_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) kref_get(&cdev->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static inline void cxgbit_put_cdev(struct cxgbit_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) kref_put(&cdev->kref, _cxgbit_free_cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static inline void cxgbit_get_csk(struct cxgbit_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) kref_get(&csk->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline void cxgbit_put_csk(struct cxgbit_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) kref_put(&csk->kref, _cxgbit_free_csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static inline void cxgbit_get_cnp(struct cxgbit_np *cnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) kref_get(&cnp->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static inline void cxgbit_put_cnp(struct cxgbit_np *cnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) kref_put(&cnp->kref, _cxgbit_free_cnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) csk->wr_pending_tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) csk->wr_pending_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return csk->wr_pending_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) cxgbit_skcb_tx_wr_next(skb) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) skb_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!csk->wr_pending_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) csk->wr_pending_head = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) csk->wr_pending_tail = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct sk_buff *skb = csk->wr_pending_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (likely(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) cxgbit_skcb_tx_wr_next(skb) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct sk_buff *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int cxgbit_setup_conn_digest(struct cxgbit_sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) void cxgbit_free_np(struct iscsi_np *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void cxgbit_abort_conn(struct cxgbit_sock *csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) void cxgbit_free_conn(struct iscsi_conn *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int cxgbit_rx_data_ack(struct cxgbit_sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct l2t_entry *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) void cxgbit_push_tx_frames(struct cxgbit_sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int cxgbit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int cxgbit_xmit_pdu(struct iscsi_conn *, struct iscsi_cmd *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct iscsi_datain_req *, const void *, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) void cxgbit_get_r2t_ttt(struct iscsi_conn *, struct iscsi_cmd *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct iscsi_r2t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) void cxgbit_get_rx_pdu(struct iscsi_conn *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int cxgbit_validate_params(struct iscsi_conn *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* DDP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int cxgbit_ddp_init(struct cxgbit_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) void cxgbit_unmap_cmd(struct iscsi_conn *, struct iscsi_cmd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #endif /* __CXGBIT_H__ */