^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2010-2015 Chelsio Communications, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Written by: Karen Xie (kxie@chelsio.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Written by: Rakesh Ranjan (rranjan@chelsio.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <net/dst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <net/ip6_route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <net/addrconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/inetdevice.h> /* ip_dev_find */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static unsigned int dbg_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "libcxgbi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define DRV_MODULE_NAME "libcxgbi"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define DRV_MODULE_DESC "Chelsio iSCSI driver library"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define DRV_MODULE_VERSION "0.9.1-ko"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define DRV_MODULE_RELDATE "Apr. 2015"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) DRV_MODULE_DESC " " DRV_MODULE_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) MODULE_AUTHOR("Chelsio Communications, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) MODULE_DESCRIPTION(DRV_MODULE_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) MODULE_VERSION(DRV_MODULE_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) module_param(dbg_level, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * cxgbi device management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * maintains a list of the cxgbi devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static LIST_HEAD(cdev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static DEFINE_MUTEX(cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static LIST_HEAD(cdev_rcu_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static DEFINE_SPINLOCK(cdev_rcu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static inline void cxgbi_decode_sw_tag(u32 sw_tag, int *idx, int *age)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (age)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *age = sw_tag & 0x7FFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *idx = (sw_tag >> 16) & 0x7FFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned int max_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct cxgbi_ports_map *pmap = &cdev->pmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) pmap->port_csk = kvzalloc(array_size(max_conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) sizeof(struct cxgbi_sock *)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) GFP_KERNEL | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (!pmap->port_csk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) pmap->max_connect = max_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) pmap->sport_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) spin_lock_init(&pmap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct cxgbi_ports_map *pmap = &cdev->pmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct cxgbi_sock *csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) for (i = 0; i < pmap->max_connect; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (pmap->port_csk[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) csk = pmap->port_csk[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) pmap->port_csk[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) log_debug(1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) "csk 0x%p, cdev 0x%p, offload down.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) csk, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) cxgbi_sock_closed(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) log_debug(1 << CXGBI_DBG_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) "cdev 0x%p, p# %u.\n", cdev, cdev->nports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) cxgbi_hbas_remove(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) cxgbi_device_portmap_cleanup(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (cdev->cdev2ppm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) cxgbi_ppm_release(cdev->cdev2ppm(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (cdev->pmap.max_connect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) kvfree(cdev->pmap.port_csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) kfree(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct cxgbi_device *cxgbi_device_register(unsigned int extra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned int nports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct cxgbi_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) cdev = kzalloc(sizeof(*cdev) + extra + nports *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) (sizeof(struct cxgbi_hba *) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) sizeof(struct net_device *)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!cdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) pr_warn("nport %d, OOM.\n", nports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) cdev->ports = (struct net_device **)(cdev + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) sizeof(struct net_device *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) cdev->dd_data = ((char *)cdev->hbas) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) nports * sizeof(struct cxgbi_hba *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) spin_lock_init(&cdev->pmap.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) mutex_lock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) list_add_tail(&cdev->list_head, &cdev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mutex_unlock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) spin_lock(&cdev_rcu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_unlock(&cdev_rcu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) log_debug(1 << CXGBI_DBG_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) "cdev 0x%p, p# %u.\n", cdev, nports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) EXPORT_SYMBOL_GPL(cxgbi_device_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void cxgbi_device_unregister(struct cxgbi_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) log_debug(1 << CXGBI_DBG_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) "cdev 0x%p, p# %u,%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) mutex_lock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) list_del(&cdev->list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) mutex_unlock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) spin_lock(&cdev_rcu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) list_del_rcu(&cdev->rcu_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) spin_unlock(&cdev_rcu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) cxgbi_device_destroy(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) EXPORT_SYMBOL_GPL(cxgbi_device_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) void cxgbi_device_unregister_all(unsigned int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct cxgbi_device *cdev, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) mutex_lock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if ((cdev->flags & flag) == flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) mutex_unlock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) cxgbi_device_unregister(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) mutex_lock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) mutex_unlock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct cxgbi_device *cdev, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) mutex_lock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (cdev->lldev == lldev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) mutex_unlock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) mutex_unlock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) log_debug(1 << CXGBI_DBG_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) "lldev 0x%p, NO match found.\n", lldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct net_device *vdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct cxgbi_device *cdev, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (is_vlan_dev(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) vdev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ndev = vlan_dev_real_dev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) log_debug(1 << CXGBI_DBG_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) "vlan dev %s -> %s.\n", vdev->name, ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) mutex_lock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) for (i = 0; i < cdev->nports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (ndev == cdev->ports[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) cdev->hbas[i]->vdev = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) mutex_unlock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *port = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) mutex_unlock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) log_debug(1 << CXGBI_DBG_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct net_device *vdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct cxgbi_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (is_vlan_dev(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) vdev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ndev = vlan_dev_real_dev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) for (i = 0; i < cdev->nports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (ndev == cdev->ports[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) cdev->hbas[i]->vdev = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) *port = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) log_debug(1 << CXGBI_DBG_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct net_device *vdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct cxgbi_device *cdev, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (is_vlan_dev(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) vdev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) ndev = vlan_dev_real_dev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) mutex_lock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) for (i = 0; i < cdev->nports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) MAX_ADDR_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) cdev->hbas[i]->vdev = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) mutex_unlock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) *port = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) mutex_unlock(&cdev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) log_debug(1 << CXGBI_DBG_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) "ndev 0x%p, %s, NO match mac found.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ndev, ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) void cxgbi_hbas_remove(struct cxgbi_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct cxgbi_hba *chba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) log_debug(1 << CXGBI_DBG_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) for (i = 0; i < cdev->nports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) chba = cdev->hbas[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (chba) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) cdev->hbas[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) iscsi_host_remove(chba->shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) pci_dev_put(cdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) iscsi_host_free(chba->shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned int max_conns, struct scsi_host_template *sht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct scsi_transport_template *stt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct cxgbi_hba *chba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) for (i = 0; i < cdev->nports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) shost = iscsi_host_alloc(sht, sizeof(*chba), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (!shost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pr_info("0x%p, p%d, %s, host alloc failed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) cdev, i, cdev->ports[i]->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) shost->transportt = stt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) shost->max_lun = max_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) shost->max_id = max_conns - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) shost->max_channel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) chba = iscsi_host_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) chba->cdev = cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) chba->ndev = cdev->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) chba->shost = shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) shost->can_queue = sht->can_queue - ISCSI_MGMT_CMDS_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) log_debug(1 << CXGBI_DBG_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) "cdev 0x%p, p#%d %s: chba 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) cdev, i, cdev->ports[i]->name, chba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) pci_dev_get(cdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) err = iscsi_host_add(shost, &cdev->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) cdev, i, cdev->ports[i]->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) pci_dev_put(cdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) scsi_host_put(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) cdev->hbas[i] = chba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) cxgbi_hbas_remove(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) EXPORT_SYMBOL_GPL(cxgbi_hbas_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * iSCSI offload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * - source port management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * To find a free source port in the port allocation map we use a very simple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * rotor scheme to look for the next free port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * If a source port has been specified make sure that it doesn't collide with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * our normal source port allocation map. If it's outside the range of our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * allocation/deallocation scheme just let them use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * If the source port is outside our allocation range, the caller is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * responsible for keeping track of their port usage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static struct cxgbi_sock *find_sock_on_port(struct cxgbi_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) unsigned char port_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct cxgbi_ports_map *pmap = &cdev->pmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) unsigned int used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (!pmap->max_connect || !pmap->used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) spin_lock_bh(&pmap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) used = pmap->used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) for (i = 0; used && i < pmap->max_connect; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct cxgbi_sock *csk = pmap->port_csk[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (csk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (csk->port_id == port_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) spin_unlock_bh(&pmap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) used--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) spin_unlock_bh(&pmap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int sock_get_port(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct cxgbi_device *cdev = csk->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct cxgbi_ports_map *pmap = &cdev->pmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) __be16 *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (!pmap->max_connect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) cdev, csk->port_id, cdev->ports[csk->port_id]->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (csk->csk_family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) port = &csk->saddr.sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) else /* ipv6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) port = &csk->saddr6.sin6_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (*port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) pr_err("source port NON-ZERO %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ntohs(*port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) spin_lock_bh(&pmap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (pmap->used >= pmap->max_connect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) spin_unlock_bh(&pmap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) cdev, csk->port_id, cdev->ports[csk->port_id]->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) start = idx = pmap->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (++idx >= pmap->max_connect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (!pmap->port_csk[idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) pmap->used++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) *port = htons(pmap->sport_base + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) pmap->next = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pmap->port_csk[idx] = csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) spin_unlock_bh(&pmap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) cxgbi_sock_get(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) log_debug(1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) "cdev 0x%p, p#%u %s, p %u, %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) cdev, csk->port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) cdev->ports[csk->port_id]->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) pmap->sport_base + idx, pmap->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) } while (idx != start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) spin_unlock_bh(&pmap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* should not happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) cdev, csk->port_id, cdev->ports[csk->port_id]->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) pmap->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static void sock_put_port(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct cxgbi_device *cdev = csk->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct cxgbi_ports_map *pmap = &cdev->pmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) __be16 *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (csk->csk_family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) port = &csk->saddr.sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) else /* ipv6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) port = &csk->saddr6.sin6_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (*port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) int idx = ntohs(*port) - pmap->sport_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) *port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (idx < 0 || idx >= pmap->max_connect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) cdev, csk->port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) cdev->ports[csk->port_id]->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ntohs(*port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) spin_lock_bh(&pmap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) pmap->port_csk[idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) pmap->used--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) spin_unlock_bh(&pmap->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) log_debug(1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) "cdev 0x%p, p#%u %s, release %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) cdev, csk->port_id, cdev->ports[csk->port_id]->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pmap->sport_base + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * iscsi tcp connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (csk->cpl_close) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) kfree_skb(csk->cpl_close);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) csk->cpl_close = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (csk->cpl_abort_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) kfree_skb(csk->cpl_abort_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) csk->cpl_abort_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (csk->cpl_abort_rpl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) kfree_skb(csk->cpl_abort_rpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) csk->cpl_abort_rpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (!csk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) pr_info("alloc csk %zu failed.\n", sizeof(*csk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (cdev->csk_alloc_cpls(csk) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) pr_info("csk 0x%p, alloc cpls failed.\n", csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) kfree(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) spin_lock_init(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) kref_init(&csk->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) skb_queue_head_init(&csk->receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) skb_queue_head_init(&csk->write_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) timer_setup(&csk->retry_timer, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) init_completion(&csk->cmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) rwlock_init(&csk->callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) csk->cdev = cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) csk->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) cxgbi_sock_set_state(csk, CTP_CLOSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static struct rtable *find_route_ipv4(struct flowi4 *fl4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) __be32 saddr, __be32 daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) __be16 sport, __be16 dport, u8 tos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) int ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct rtable *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) dport, sport, IPPROTO_TCP, tos, ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (IS_ERR(rt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static struct cxgbi_sock *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct cxgbi_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct rtable *rt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct neighbour *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct flowi4 fl4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct cxgbi_sock *csk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) unsigned int mtu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) int port = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) daddr->sin_port, 0, ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (!rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) pr_info("no route to ipv4 0x%x, port %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) be32_to_cpu(daddr->sin_addr.s_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) be16_to_cpu(daddr->sin_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) err = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) dst = &rt->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) goto rel_rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) ndev = n->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) pr_info("multi-cast route %pI4, port %u, dev %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) err = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) goto rel_neigh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (ndev->flags & IFF_LOOPBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (!ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) err = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) goto rel_neigh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) mtu = ndev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) n->dev->name, ndev->name, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) pr_info("%s interface not up.\n", ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) err = -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) goto rel_neigh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) cdev = cxgbi_device_find_by_netdev(ndev, &port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) cdev = cxgbi_device_find_by_mac(ndev, &port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (!cdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) pr_info("dst %pI4, %s, NOT cxgbi device.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) &daddr->sin_addr.s_addr, ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) err = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) goto rel_neigh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) log_debug(1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) port, ndev->name, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) csk = cxgbi_sock_create(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (!csk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) goto rel_neigh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) csk->cdev = cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) csk->port_id = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) csk->mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) csk->dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) csk->csk_family = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) csk->daddr.sin_port = daddr->sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) csk->daddr.sin_family = daddr->sin_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) csk->saddr.sin_family = daddr->sin_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) csk->saddr.sin_addr.s_addr = fl4.saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) neigh_release(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) rel_neigh:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) neigh_release(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) rel_rt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ip_rt_put(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) const struct in6_addr *daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct flowi6 fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) memset(&fl, 0, sizeof(fl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) fl.flowi6_oif = ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (saddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (daddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) memcpy(&fl.daddr, daddr, sizeof(struct in6_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static struct cxgbi_sock *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct cxgbi_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct rt6_info *rt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct neighbour *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct in6_addr pref_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct cxgbi_sock *csk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) unsigned int mtu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) int port = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) rt = find_route_ipv6(NULL, &daddr6->sin6_addr, ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (!rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) pr_info("no route to ipv6 %pI6 port %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) daddr6->sin6_addr.s6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) be16_to_cpu(daddr6->sin6_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) err = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) dst = &rt->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) n = dst_neigh_lookup(dst, &daddr6->sin6_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) pr_info("%pI6, port %u, dst no neighbour.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) daddr6->sin6_addr.s6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) be16_to_cpu(daddr6->sin6_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) err = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) goto rel_rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ndev = n->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) pr_info("%s interface not up.\n", ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) err = -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) goto rel_rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) pr_info("multi-cast route %pI6 port %u, dev %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) daddr6->sin6_addr.s6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ntohs(daddr6->sin6_port), ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) err = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) goto rel_rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) cdev = cxgbi_device_find_by_netdev(ndev, &port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) cdev = cxgbi_device_find_by_mac(ndev, &port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!cdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) pr_info("dst %pI6 %s, NOT cxgbi device.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) daddr6->sin6_addr.s6_addr, ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) err = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) goto rel_rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) log_debug(1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) ndev->name, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) csk = cxgbi_sock_create(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (!csk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) goto rel_rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) csk->cdev = cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) csk->port_id = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) csk->mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) csk->dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) rt6_get_prefsrc(rt, &pref_saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (ipv6_addr_any(&pref_saddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) &daddr6->sin6_addr, 0, &pref_saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) pr_info("failed to get source address to reach %pI6\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) &daddr6->sin6_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) goto rel_rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) csk->csk_family = AF_INET6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) csk->daddr6.sin6_addr = daddr6->sin6_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) csk->daddr6.sin6_port = daddr6->sin6_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) csk->daddr6.sin6_family = daddr6->sin6_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) csk->saddr6.sin6_family = daddr6->sin6_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) csk->saddr6.sin6_addr = pref_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) neigh_release(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) rel_rt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) neigh_release(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ip6_rt_put(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) cxgbi_sock_closed(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) #endif /* IS_ENABLED(CONFIG_IPV6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) unsigned int opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) dst_confirm(csk->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) cxgbi_sock_set_state(csk, CTP_ESTABLISHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) EXPORT_SYMBOL_GPL(cxgbi_sock_established);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) log_debug(1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) csk, csk->state, csk->flags, csk->user_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (csk->state != CTP_ESTABLISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) read_lock_bh(&csk->callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (csk->user_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) iscsi_conn_failure(csk->user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ISCSI_ERR_TCP_CONN_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) read_unlock_bh(&csk->callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) void cxgbi_sock_closed(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) csk, (csk)->state, (csk)->flags, (csk)->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (csk->saddr.sin_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) sock_put_port(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (csk->dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) dst_release(csk->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) csk->cdev->csk_release_offload_resources(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) cxgbi_sock_set_state(csk, CTP_CLOSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) cxgbi_inform_iscsi_conn_closing(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) EXPORT_SYMBOL_GPL(cxgbi_sock_closed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static void need_active_close(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) int data_lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) int close_req = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) csk, (csk)->state, (csk)->flags, (csk)->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (csk->dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) dst_confirm(csk->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) data_lost = skb_queue_len(&csk->receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) __skb_queue_purge(&csk->receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (csk->state == CTP_ACTIVE_OPEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) else if (csk->state == CTP_ESTABLISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) close_req = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) } else if (csk->state == CTP_PASSIVE_CLOSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) close_req = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (close_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) data_lost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) csk->cdev->csk_send_abort_req(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) csk->cdev->csk_send_close_req(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) csk, csk->state, csk->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) errno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) cxgbi_sock_set_state(csk, CTP_CONNECTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) csk->err = errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) cxgbi_sock_closed(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct module *owner = csk->cdev->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) csk, (csk)->state, (csk)->flags, (csk)->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) cxgbi_sock_get(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (csk->state == CTP_ACTIVE_OPEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) __kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) module_put(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) cxgbi_sock_get(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) csk, csk->state, csk->flags, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) cxgbi_sock_closed(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) csk, (csk)->state, (csk)->flags, (csk)->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) cxgbi_sock_get(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) switch (csk->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) case CTP_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) case CTP_ACTIVE_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) case CTP_CLOSE_WAIT_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) cxgbi_sock_closed(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) case CTP_ABORTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) csk, csk->state, csk->flags, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) cxgbi_inform_iscsi_conn_closing(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) csk, (csk)->state, (csk)->flags, (csk)->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) cxgbi_sock_get(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) csk->snd_una = snd_nxt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) switch (csk->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) case CTP_ACTIVE_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) case CTP_CLOSE_WAIT_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) case CTP_CLOSE_WAIT_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) cxgbi_sock_closed(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) case CTP_ABORTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) csk, csk->state, csk->flags, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) unsigned int snd_una, int seq_chk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) csk, csk->state, csk->flags, csk->tid, credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) csk->wr_cred += credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) while (credits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct sk_buff *p = cxgbi_sock_peek_wr(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (unlikely(!p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) csk, csk->state, csk->flags, csk->tid, credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) csk->wr_cred, csk->wr_una_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (unlikely(credits < p->csum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) csk, csk->state, csk->flags, csk->tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) credits, csk->wr_cred, csk->wr_una_cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) p->csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) p->csum -= credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) cxgbi_sock_dequeue_wr(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) credits -= p->csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) kfree_skb(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) cxgbi_sock_check_wr_invariants(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (seq_chk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (unlikely(before(snd_una, csk->snd_una))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) csk, csk->state, csk->flags, csk->tid, snd_una,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) csk->snd_una);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (csk->snd_una != snd_una) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) csk->snd_una = snd_una;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) dst_confirm(csk->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (skb_queue_len(&csk->write_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (csk->cdev->csk_push_tx_frames(csk, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) cxgbi_conn_tx_open(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) cxgbi_conn_tx_open(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) unsigned short mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) ++i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct dst_entry *dst = csk->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) csk->advmss = dst_metric_advmss(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (csk->advmss > pmtu - 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) csk->advmss = pmtu - 40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (csk->advmss < csk->cdev->mtus[0] - 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) csk->advmss = csk->cdev->mtus[0] - 40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) cxgbi_skcb_tcp_seq(skb) = csk->write_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) __skb_queue_tail(&csk->write_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) int pending = cxgbi_sock_count_pending_wrs(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (unlikely(csk->wr_cred + pending != csk->wr_max_cred))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) unsigned int *sgcnt, unsigned int *dlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) unsigned int prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : &sc->sdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) *sgl = sdb->table.sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) *sgcnt = sdb->table.nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) *dlen = sdb->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /* Caution: for protection sdb, sdb->length is invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *ppod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct cxgbi_task_tag_info *ttinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct scatterlist **sg_pp, unsigned int *sg_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) unsigned int offset = sg_off ? *sg_off : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) dma_addr_t addr = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) unsigned int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) for (i = 0; i < PPOD_PAGES_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) ppod->addr[i] = cpu_to_be64(addr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) offset += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (offset == (len + sg->offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) ppod->addr[i] = 0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * the fifth address needs to be repeated in the next ppod, so do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * not move sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (sg_pp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) *sg_pp = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) *sg_off = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (offset == len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * APIs interacting with open-iscsi libraries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) int cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) struct cxgbi_tag_format *tformat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) unsigned int iscsi_size, unsigned int llimit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) unsigned int start, unsigned int rsvd_factor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) unsigned int edram_start, unsigned int edram_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) cdev->lldev, tformat, iscsi_size, llimit, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) rsvd_factor, edram_start, edram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (err >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (ppm->ppmax < 1024 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) cdev->flags |= CXGBI_FLAG_DDP_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) cdev->flags |= CXGBI_FLAG_DDP_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static int cxgbi_ddp_sgl_check(struct scatterlist *sgl, int nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) int last_sgidx = nents - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) struct scatterlist *sg = sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) for (i = 0; i < nents; i++, sg = sg_next(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) unsigned int len = sg->length + sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if ((sg->offset & 0x3) || (i && sg->offset) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) ((i != last_sgidx) && len != PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) log_debug(1 << CXGBI_DBG_DDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) "sg %u/%u, %u,%u, not aligned.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) i, nents, sg->offset, sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct cxgbi_task_data *tdata, u32 sw_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) unsigned int xferlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct cxgbi_sock *csk = cconn->cep->csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct cxgbi_device *cdev = csk->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct scatterlist *sgl = ttinfo->sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) unsigned int sgcnt = ttinfo->nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) unsigned int sg_offset = sgl->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (cdev->flags & CXGBI_FLAG_DDP_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) log_debug(1 << CXGBI_DBG_DDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) "cdev 0x%p DDP off.\n", cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (!ppm || xferlen < DDP_THRESHOLD || !sgcnt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) log_debug(1 << CXGBI_DBG_DDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) ppm, ppm ? ppm->tformat.pgsz_idx_dflt : DDP_PGIDX_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) xferlen, ttinfo->nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /* make sure the buffer is suitable for ddp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (cxgbi_ddp_sgl_check(sgl, sgcnt) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) ttinfo->nr_pages = (xferlen + sgl->offset + (1 << PAGE_SHIFT) - 1) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * the ddp tag will be used for the itt in the outgoing pdu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * the itt genrated by libiscsi is saved in the ppm and can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * retrieved via the ddp tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) &ttinfo->tag, (unsigned long)sw_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) cconn->ddp_full++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) ttinfo->npods = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /* setup dma from scsi command sgl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) sgl->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) err = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) sgl->offset = sg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) __func__, sw_tag, xferlen, sgcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) goto rel_ppods;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (err != ttinfo->nr_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) log_debug(1 << CXGBI_DBG_DDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) __func__, sw_tag, xferlen, sgcnt, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) ttinfo->cid = csk->port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) xferlen, &ttinfo->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /* write ppod from control queue now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) err = cdev->csk_ddp_set_map(ppm, csk, ttinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) goto rel_ppods;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) rel_ppods:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_MAPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) struct scsi_cmnd *sc = task->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) struct cxgbi_conn *cconn = tcp_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) struct cxgbi_device *cdev = cconn->chba->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) u32 tag = ntohl((__force u32)hdr_itt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) log_debug(1 << CXGBI_DBG_DDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) "cdev 0x%p, task 0x%p, release tag 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) cdev, task, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (sc && sc->sc_data_direction == DMA_FROM_DEVICE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) cxgbi_ppm_is_ddp_tag(ppm, tag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (!(cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) cdev->csk_ddp_clear_map(cdev, ppm, ttinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static inline u32 cxgbi_build_sw_tag(u32 idx, u32 age)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /* assume idx and age both are < 0x7FFF (32767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) return (idx << 16) | age;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) struct scsi_cmnd *sc = task->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct iscsi_conn *conn = task->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) struct iscsi_session *sess = conn->session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) struct cxgbi_conn *cconn = tcp_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct cxgbi_device *cdev = cconn->chba->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) u32 sw_tag = cxgbi_build_sw_tag(task->itt, sess->age);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) u32 tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (sc && sc->sc_data_direction == DMA_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) scmd_get_params(sc, &ttinfo->sgl, &ttinfo->nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) &tdata->dlen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) err = cxgbi_ddp_reserve(cconn, tdata, sw_tag, tdata->dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) tag = ttinfo->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) log_debug(1 << CXGBI_DBG_DDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) cconn->cep->csk, task, tdata->dlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) ttinfo->nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) err = cxgbi_ppm_make_non_ddp_tag(ppm, sw_tag, &tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* the itt need to sent in big-endian order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) *hdr_itt = (__force itt_t)htonl(tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) log_debug(1 << CXGBI_DBG_DDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct cxgbi_conn *cconn = tcp_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) struct cxgbi_device *cdev = cconn->chba->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) u32 tag = ntohl((__force u32)itt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) u32 sw_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (ppm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (cxgbi_ppm_is_ddp_tag(ppm, tag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) sw_bits = cxgbi_ppm_get_tag_caller_data(ppm, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) sw_bits = cxgbi_ppm_decode_non_ddp_tag(ppm, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) sw_bits = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) cxgbi_decode_sw_tag(sw_bits, idx, age);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) log_debug(1 << CXGBI_DBG_DDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) age ? *age : 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct iscsi_conn *conn = csk->user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) log_debug(1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) "csk 0x%p, cid %d.\n", csk, conn->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) iscsi_conn_queue_work(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * pdu receive, interact with libiscsi_tcp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) static inline int read_pdu_skb(struct iscsi_conn *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) int offloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) int bytes_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) case ISCSI_TCP_CONN_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) skb, offset, offloaded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) case ISCSI_TCP_SUSPENDED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) log_debug(1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) skb, offset, offloaded, bytes_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /* no transfer - just have caller flush queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) return bytes_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) case ISCSI_TCP_SKB_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) skb, offset, offloaded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * pdus should always fit in the skb and we should get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * segment done notifcation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) case ISCSI_TCP_SEGMENT_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) log_debug(1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) skb, offset, offloaded, bytes_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return bytes_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) skb, offset, offloaded, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) log_debug(1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) conn, skb, skb->len, cxgbi_skcb_flags(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (conn->hdrdgst_en &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /* If completion flag is set and data is directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * placed in to the host memory then update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * task->exp_datasn to the datasn in completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * iSCSI hdr as T6 adapter generates completion only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * for the last pdu of a sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) itt_t itt = ((struct iscsi_data *)skb->data)->itt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) u32 data_sn = be32_to_cpu(((struct iscsi_data *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) skb->data)->datasn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (task && task->sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct iscsi_tcp_task *tcp_task = task->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) tcp_task->exp_datasn = data_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) err = read_pdu_skb(conn, skb, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (likely(err >= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) struct sk_buff *skb, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) bool offloaded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) log_debug(1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) conn, skb, skb->len, cxgbi_skcb_flags(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (conn->datadgst_en &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) conn, lskb, cxgbi_skcb_flags(lskb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /* coalesced, add header digest length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (lskb == skb && conn->hdrdgst_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) offset += ISCSI_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) offloaded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (opcode == ISCSI_OP_SCSI_DATA_IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) log_debug(1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) skb, opcode, ntohl(tcp_conn->in.hdr->itt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) tcp_conn->in.datalen, offloaded ? "is" : "not");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return read_pdu_skb(conn, skb, offset, offloaded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) struct cxgbi_device *cdev = csk->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) int must_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) u32 credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) log_debug(1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) csk->rcv_wup, cdev->rx_credit_thres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) csk->rcv_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (!cdev->rx_credit_thres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (csk->state != CTP_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) credits = csk->copied_seq - csk->rcv_wup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (unlikely(!credits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) must_send = credits + 16384 >= csk->rcv_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (must_send || credits >= cdev->rx_credit_thres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) struct cxgbi_device *cdev = csk->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) struct iscsi_conn *conn = csk->user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) unsigned int read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) log_debug(1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) "csk 0x%p, conn 0x%p.\n", csk, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (unlikely(!conn || conn->suspend_rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) log_debug(1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) csk, conn, conn ? conn->id : 0xFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) conn ? conn->suspend_rx : 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) while (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) skb = skb_peek(&csk->receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (!skb ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) log_debug(1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) "skb 0x%p, NOT ready 0x%lx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) skb, cxgbi_skcb_flags(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) __skb_unlink(skb, &csk->receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) read += cxgbi_skcb_rx_pdulen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) log_debug(1 << CXGBI_DBG_PDU_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) csk, skb, skb->len, cxgbi_skcb_flags(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) cxgbi_skcb_rx_pdulen(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) err = skb_read_pdu_bhs(csk, conn, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) "f 0x%lx, plen %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) csk, skb, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) cxgbi_skcb_flags(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) cxgbi_skcb_rx_pdulen(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) goto skb_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) err = skb_read_pdu_data(conn, skb, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) err + cdev->skb_rx_extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) "f 0x%lx, plen %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) csk, skb, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) cxgbi_skcb_flags(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) cxgbi_skcb_rx_pdulen(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) err = skb_read_pdu_bhs(csk, conn, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) "f 0x%lx, plen %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) csk, skb, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) cxgbi_skcb_flags(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) cxgbi_skcb_rx_pdulen(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) goto skb_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) struct sk_buff *dskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) dskb = skb_peek(&csk->receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (!dskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) " plen %u, NO data.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) csk, skb, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) cxgbi_skcb_flags(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) cxgbi_skcb_rx_pdulen(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) goto skb_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) __skb_unlink(dskb, &csk->receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) err = skb_read_pdu_data(conn, skb, dskb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) pr_err("data, csk 0x%p, skb 0x%p,%u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) "f 0x%lx, plen %u, dskb 0x%p,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) "%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) csk, skb, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) cxgbi_skcb_flags(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) cxgbi_skcb_rx_pdulen(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) dskb, dskb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) __kfree_skb(dskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) err = skb_read_pdu_data(conn, skb, skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) skb_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) __kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) if (read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) csk->copied_seq += read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) csk_return_rx_credits(csk, read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) conn->rxdata_octets += read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) csk, conn, err, read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) unsigned int offset, unsigned int *off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) struct scatterlist **sgp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) for_each_sg(sgl, sg, sgcnt, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (offset < sg->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) *off = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) *sgp = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) offset -= sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) unsigned int dlen, struct page_frag *frags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) int frag_max, u32 *dlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) unsigned int datalen = dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) unsigned int sglen = sg->length - sgoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct page *page = sg_page(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) unsigned int copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (!sglen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (!sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) pr_warn("sg %d NULL, len %u/%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) i, datalen, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) sgoffset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) sglen = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) page = sg_page(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) copy = min(datalen, sglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (i && page == frags[i - 1].page &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) sgoffset + sg->offset ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) frags[i - 1].offset + frags[i - 1].size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) frags[i - 1].size += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (i >= frag_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) pr_warn("too many pages %u, dlen %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) frag_max, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) *dlimit = dlen - datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) frags[i].page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) frags[i].offset = sg->offset + sgoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) frags[i].size = copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) datalen -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) sgoffset += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) sglen -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) } while (datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) static void cxgbi_task_data_sgl_check(struct iscsi_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) struct scsi_cmnd *sc = task->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) struct scatterlist *sg, *sgl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) u32 sgcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) tdata->flags = CXGBI_TASK_SGL_CHECKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (!sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) scmd_get_params(sc, &sgl, &sgcnt, &tdata->dlen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (!sgl || !sgcnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) tdata->flags |= CXGBI_TASK_SGL_COPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) for_each_sg(sgl, sg, sgcnt, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if (page_count(sg_page(sg)) < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) tdata->flags |= CXGBI_TASK_SGL_COPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) cxgbi_task_data_sgl_read(struct iscsi_task *task, u32 offset, u32 count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) u32 *dlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) struct scsi_cmnd *sc = task->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) struct scatterlist *sgl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) u32 dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) u32 sgcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (!sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) scmd_get_params(sc, &sgl, &sgcnt, &dlen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (!sgl || !sgcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) err = sgl_seek_offset(sgl, sgcnt, offset, &tdata->sgoffset, &sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) pr_warn("tpdu max, sgl %u, bad offset %u/%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) sgcnt, offset, tdata->dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) err = sgl_read_to_frags(sg, tdata->sgoffset, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) tdata->frags, MAX_SKB_FRAGS, dlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) "sgl max limit, sgl %u, offset %u, %u/%u, dlimit %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) sgcnt, offset, count, tdata->dlen, *dlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) tdata->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) tdata->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) tdata->nr_frags = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) tdata->total_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) tdata->total_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) "%s: offset %u, count %u,\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) "err %u, total_count %u, total_offset %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) __func__, offset, count, err, tdata->total_count, tdata->total_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) struct iscsi_conn *conn = task->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) struct iscsi_session *session = task->conn->session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) struct cxgbi_conn *cconn = tcp_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) struct cxgbi_device *cdev = cconn->chba->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) struct cxgbi_sock *csk = cconn->cep ? cconn->cep->csk : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) struct iscsi_tcp_task *tcp_task = task->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) struct scsi_cmnd *sc = task->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) u32 headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) u32 max_txdata_len = conn->max_xmit_dlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) u32 iso_tx_rsvd = 0, local_iso_info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) u32 last_tdata_offset, last_tdata_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (!tcp_task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) task, tcp_task, tdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (!csk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) pr_err("task 0x%p, csk gone.\n", task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) op &= ISCSI_OPCODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) tcp_task->dd_data = tdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) task->hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) last_tdata_count = tdata->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) last_tdata_offset = tdata->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if ((op == ISCSI_OP_SCSI_DATA_OUT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) ((op == ISCSI_OP_SCSI_CMD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) (sc->sc_data_direction == DMA_TO_DEVICE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) u32 remaining_data_tosend, dlimit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) u32 max_pdu_size, max_num_pdu, num_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /* Preserve conn->max_xmit_dlength because it can get updated to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * ISO data size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (task->state == ISCSI_TASK_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) tdata->max_xmit_dlength = conn->max_xmit_dlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) if (!tdata->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) cxgbi_task_data_sgl_check(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) remaining_data_tosend =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) tdata->dlen - tdata->offset - tdata->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) recalculate_sgl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) max_txdata_len = tdata->max_xmit_dlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) "tdata->dlen %u, remaining to send %u "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) "conn->max_xmit_dlength %u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) "tdata->max_xmit_dlength %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) tdata->dlen, remaining_data_tosend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) conn->max_xmit_dlength, tdata->max_xmit_dlength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (cdev->skb_iso_txhdr && !csk->disable_iso &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) (remaining_data_tosend > tdata->max_xmit_dlength) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) !(remaining_data_tosend % 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) u32 max_iso_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if ((op == ISCSI_OP_SCSI_CMD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) session->initial_r2t_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) goto no_iso;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) max_pdu_size = tdata->max_xmit_dlength +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) ISCSI_PDU_NONPAYLOAD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) max_iso_data = rounddown(CXGBI_MAX_ISO_DATA_IN_SKB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) csk->advmss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) max_num_pdu = max_iso_data / max_pdu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) num_pdu = (remaining_data_tosend +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) tdata->max_xmit_dlength - 1) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) tdata->max_xmit_dlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (num_pdu > max_num_pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) num_pdu = max_num_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) conn->max_xmit_dlength = tdata->max_xmit_dlength * num_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) max_txdata_len = conn->max_xmit_dlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) iso_tx_rsvd = cdev->skb_iso_txhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) local_iso_info = sizeof(struct cxgbi_iso_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) "max_pdu_size %u, max_num_pdu %u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) "max_txdata %u, num_pdu %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) max_pdu_size, max_num_pdu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) max_txdata_len, num_pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) no_iso:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) count = min_t(u32, max_txdata_len, remaining_data_tosend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) err = cxgbi_task_data_sgl_read(task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) tdata->offset + tdata->count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) count, &dlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (unlikely(err < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) "task 0x%p, tcp_task 0x%p, tdata 0x%p, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) "sgl err %d, count %u, dlimit %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) task, tcp_task, tdata, err, count, dlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (dlimit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) remaining_data_tosend =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) rounddown(dlimit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) tdata->max_xmit_dlength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (!remaining_data_tosend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) remaining_data_tosend = dlimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) dlimit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) conn->max_xmit_dlength = remaining_data_tosend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) goto recalculate_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) pr_err("task 0x%p, tcp_task 0x%p, tdata 0x%p, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) "sgl err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) task, tcp_task, tdata, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) goto ret_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if ((tdata->flags & CXGBI_TASK_SGL_COPY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) (tdata->nr_frags > MAX_SKB_FRAGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) headroom += conn->max_xmit_dlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) tdata->skb = alloc_skb(local_iso_info + cdev->skb_tx_rsvd +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) iso_tx_rsvd + headroom, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (!tdata->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) tdata->count = last_tdata_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) tdata->offset = last_tdata_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) goto ret_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) skb_reserve(tdata->skb, local_iso_info + cdev->skb_tx_rsvd +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) iso_tx_rsvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (task->sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) task->hdr = (struct iscsi_hdr *)tdata->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (!task->hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) __kfree_skb(tdata->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) tdata->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (iso_tx_rsvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) cxgbi_skcb_set_flag(tdata->skb, SKCBF_TX_ISO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) /* data_out uses scsi_cmd's itt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (op != ISCSI_OP_SCSI_DATA_OUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) task_reserve_itt(task, &task->hdr->itt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) task, op, tdata->skb, cdev->skb_tx_rsvd, headroom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) conn->max_xmit_dlength, be32_to_cpu(task->hdr->itt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) ret_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) conn->max_xmit_dlength = tdata->max_xmit_dlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) cxgbi_prep_iso_info(struct iscsi_task *task, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) struct cxgbi_iso_info *iso_info = (struct cxgbi_iso_info *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) struct iscsi_r2t_info *r2t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) struct iscsi_conn *conn = task->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) struct iscsi_session *session = conn->session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) struct iscsi_tcp_task *tcp_task = task->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) u32 burst_size = 0, r2t_dlength = 0, dlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) u32 max_pdu_len = tdata->max_xmit_dlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) u32 segment_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) u32 num_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if (unlikely(!cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) memset(iso_info, 0, sizeof(struct cxgbi_iso_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) if (task->hdr->opcode == ISCSI_OP_SCSI_CMD && session->imm_data_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) iso_info->flags |= CXGBI_ISO_INFO_IMM_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) burst_size = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) dlength = ntoh24(task->hdr->dlength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) dlength = min(dlength, max_pdu_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) hton24(task->hdr->dlength, dlength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) num_pdu = (count + max_pdu_len - 1) / max_pdu_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (iscsi_task_has_unsol_data(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) r2t = &task->unsol_r2t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) r2t = tcp_task->r2t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (r2t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) "count %u, tdata->count %u, num_pdu %u,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) "task->hdr_len %u, r2t->data_length %u, r2t->sent %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) count, tdata->count, num_pdu, task->hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) r2t->data_length, r2t->sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) r2t_dlength = r2t->data_length - r2t->sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) segment_offset = r2t->sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) r2t->datasn += num_pdu - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (!r2t || !r2t->sent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) iso_info->flags |= CXGBI_ISO_INFO_FSLICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (task->hdr->flags & ISCSI_FLAG_CMD_FINAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) iso_info->flags |= CXGBI_ISO_INFO_LSLICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) task->hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) iso_info->op = task->hdr->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) iso_info->ahs = task->hdr->hlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) iso_info->num_pdu = num_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) iso_info->mpdu = max_pdu_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) iso_info->burst_size = (burst_size + r2t_dlength) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) iso_info->len = count + task->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) iso_info->segment_offset = segment_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) cxgbi_skcb_tx_iscsi_hdrlen(skb) = task->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if (hcrc || dcrc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) u8 submode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (hcrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) submode |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (dcrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) submode |= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) cxgbi_skcb_tx_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) cxgbi_skcb_tx_ulp_mode(skb) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) static struct page *rsvd_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) struct iscsi_conn *conn = task->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) struct iscsi_tcp_task *tcp_task = task->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) struct scsi_cmnd *sc = task->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) u32 expected_count, expected_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) u32 datalen = count, dlimit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) u32 i, padlen = iscsi_padding(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) struct page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) if (!tcp_task || (tcp_task->dd_data != tdata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) task, task->sc, tcp_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) tcp_task ? tcp_task->dd_data : NULL, tdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) skb = tdata->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) be32_to_cpu(task->cmdsn), be32_to_cpu(task->hdr->itt), offset, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) skb_put(skb, task->hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (!count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) tdata->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) tdata->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) tdata->nr_frags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) tdata->total_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) tdata->total_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (tdata->max_xmit_dlength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) conn->max_xmit_dlength = tdata->max_xmit_dlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) cxgbi_skcb_clear_flag(skb, SKCBF_TX_ISO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) "data->total_count %u, tdata->total_offset %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) tdata->total_count, tdata->total_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) expected_count = tdata->total_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) expected_offset = tdata->total_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if ((count != expected_count) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) (offset != expected_offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) err = cxgbi_task_data_sgl_read(task, offset, count, &dlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) "dlimit %u, sgl err %d.\n", task, task->sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) tcp_task, tcp_task ? tcp_task->dd_data : NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) tdata, dlimit, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) /* Restore original value of conn->max_xmit_dlength because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) * it can get updated to ISO data size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) conn->max_xmit_dlength = tdata->max_xmit_dlength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) struct page_frag *frag = tdata->frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) if ((tdata->flags & CXGBI_TASK_SGL_COPY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) (tdata->nr_frags > MAX_SKB_FRAGS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) (padlen && (tdata->nr_frags ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) MAX_SKB_FRAGS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) char *dst = skb->data + task->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) /* data fits in the skb's headroom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) for (i = 0; i < tdata->nr_frags; i++, frag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) char *src = kmap_atomic(frag->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) memcpy(dst, src + frag->offset, frag->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) dst += frag->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) if (padlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) memset(dst, 0, padlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) padlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) skb_put(skb, count + padlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) for (i = 0; i < tdata->nr_frags; i++, frag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) get_page(frag->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) skb_fill_page_desc(skb, i, frag->page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) frag->offset, frag->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) skb->len += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) skb->data_len += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) skb->truesize += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) pg = virt_to_head_page(task->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) get_page(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) skb_fill_page_desc(skb, 0, pg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) task->data - (char *)page_address(pg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) skb->len += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) skb->data_len += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) skb->truesize += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (padlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) get_page(rsvd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) rsvd_page, 0, padlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) skb->data_len += padlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) skb->truesize += padlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) skb->len += padlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) if (likely(count > tdata->max_xmit_dlength))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) cxgbi_prep_iso_info(task, skb, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) cxgbi_skcb_clear_flag(skb, SKCBF_TX_ISO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) static int cxgbi_sock_tx_queue_up(struct cxgbi_sock *csk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) struct cxgbi_device *cdev = csk->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) struct cxgbi_iso_info *iso_cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) u32 frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) u32 extra_len, num_pdu, hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) u32 iso_tx_rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) if (csk->state != CTP_ESTABLISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) log_debug(1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) csk, csk->state, csk->flags, csk->tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (csk->err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) log_debug(1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) csk, csk->state, csk->flags, csk->tid, csk->err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if ((cdev->flags & CXGBI_FLAG_DEV_T3) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) before((csk->snd_win + csk->snd_una), csk->write_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) log_debug(1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) csk, csk->state, csk->flags, csk->tid, csk->write_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) csk->snd_una, csk->snd_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) iso_tx_rsvd = cdev->skb_iso_txhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) if (unlikely(skb_headroom(skb) < (cdev->skb_tx_rsvd + iso_tx_rsvd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) pr_err("csk 0x%p, skb head %u < %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) csk, skb_headroom(skb), cdev->skb_tx_rsvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (skb->len != skb->data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) frags++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (frags >= SKB_WR_LIST_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) pr_err("csk 0x%p, frags %u, %u,%u >%lu.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) csk, skb_shinfo(skb)->nr_frags, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) skb->data_len, SKB_WR_LIST_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) cxgbi_sock_skb_entail(csk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) extra_len = cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_ISO))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) iso_cpl = (struct cxgbi_iso_info *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) num_pdu = iso_cpl->num_pdu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) hdr_len = cxgbi_skcb_tx_iscsi_hdrlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) extra_len = (cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb)) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) num_pdu) + (hdr_len * (num_pdu - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) csk->write_seq += (skb->len + extra_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) static int cxgbi_sock_send_skb(struct cxgbi_sock *csk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) struct cxgbi_device *cdev = csk->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) int len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) spin_lock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) err = cxgbi_sock_tx_queue_up(csk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if (likely(skb_queue_len(&csk->write_queue)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) cdev->csk_push_tx_frames(csk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) spin_unlock_bh(&csk->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) struct cxgbi_conn *cconn = tcp_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) struct iscsi_tcp_task *tcp_task = task->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) struct cxgbi_sock *csk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) u32 pdulen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) u32 datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (!tcp_task || (tcp_task->dd_data != tdata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) pr_err("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) task, task->sc, tcp_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) tcp_task ? tcp_task->dd_data : NULL, tdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) skb = tdata->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) "task 0x%p, skb NULL.\n", task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (cconn && cconn->cep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) csk = cconn->cep->csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) if (!csk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) "task 0x%p, csk gone.\n", task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) tdata->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) datalen = skb->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) /* write ppod first if using ofldq to write ppod */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) pr_err("task 0x%p, ppod writing using ofldq failed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) /* continue. Let fl get the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) if (!task->sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) err = cxgbi_sock_send_skb(csk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) pdulen += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) log_debug(1 << CXGBI_DBG_PDU_TX, "task 0x%p,0x%p, rv %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) task, task->sc, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) if (task->conn->hdrdgst_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) pdulen += ISCSI_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) if (datalen && task->conn->datadgst_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) pdulen += ISCSI_DIGEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) task->conn->txdata_octets += pdulen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (unlikely(cxgbi_is_iso_config(csk) && cxgbi_is_iso_disabled(csk))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (time_after(jiffies, csk->prev_iso_ts + HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) csk->disable_iso = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) csk->prev_iso_ts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) log_debug(1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) "enable iso: csk 0x%p\n", csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) if (err == -EAGAIN || err == -ENOBUFS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) log_debug(1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) task, skb, skb->len, skb->data_len, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) /* reset skb to send when we are called again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) tdata->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (cxgbi_is_iso_config(csk) && !cxgbi_is_iso_disabled(csk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) (csk->no_tx_credits++ >= 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) csk->disable_iso = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) csk->prev_iso_ts = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) log_debug(1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) "disable iso:csk 0x%p, ts:%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) csk, csk->prev_iso_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) task->itt, skb, skb->len, skb->data_len, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) __kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) void cxgbi_cleanup_task(struct iscsi_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) struct iscsi_tcp_task *tcp_task = task->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (!tcp_task || (tcp_task->dd_data != tdata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) task, task->sc, tcp_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) tcp_task ? tcp_task->dd_data : NULL, tdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) "task 0x%p, skb 0x%p, itt 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) task, tdata->skb, task->hdr_itt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) tcp_task->dd_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) if (!task->sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) kfree(task->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) task->hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) /* never reached the xmit task callout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) if (tdata->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) __kfree_skb(tdata->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) tdata->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) task_release_itt(task, task->hdr_itt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) memset(tdata, 0, sizeof(*tdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) iscsi_tcp_cleanup_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) struct iscsi_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) struct iscsi_conn *conn = cls_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) stats->txdata_octets = conn->txdata_octets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) stats->rxdata_octets = conn->rxdata_octets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) stats->dataout_pdus = conn->dataout_pdus_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) stats->datain_pdus = conn->datain_pdus_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) stats->r2t_pdus = conn->r2t_pdus_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) stats->digest_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) stats->timeout_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) stats->custom_length = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) strcpy(stats->custom[0].desc, "eh_abort_cnt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) stats->custom[0].value = conn->eh_abort_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) struct cxgbi_conn *cconn = tcp_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) struct cxgbi_device *cdev = cconn->chba->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) unsigned int max_def = 512 * MAX_SKB_FRAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) unsigned int max = max(max_def, headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) max = min(cconn->chba->cdev->tx_max_size, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) if (conn->max_xmit_dlength)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) conn->max_xmit_dlength = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) cxgbi_align_pdu_size(conn->max_xmit_dlength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) struct cxgbi_conn *cconn = tcp_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) unsigned int max = cconn->chba->cdev->rx_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) cxgbi_align_pdu_size(max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) if (conn->max_recv_dlength) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) if (conn->max_recv_dlength > max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) pr_err("MaxRecvDataSegmentLength %u > %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) conn->max_recv_dlength, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) conn->max_recv_dlength = min(conn->max_recv_dlength, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) cxgbi_align_pdu_size(conn->max_recv_dlength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) conn->max_recv_dlength = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) enum iscsi_param param, char *buf, int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) struct iscsi_conn *conn = cls_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) struct cxgbi_conn *cconn = tcp_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) struct cxgbi_sock *csk = cconn->cep->csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) "cls_conn 0x%p, param %d, buf(%d) %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) cls_conn, param, buflen, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) switch (param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) case ISCSI_PARAM_HDRDGST_EN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) err = iscsi_set_param(cls_conn, param, buf, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) if (!err && conn->hdrdgst_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) conn->hdrdgst_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) conn->datadgst_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) case ISCSI_PARAM_DATADGST_EN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) err = iscsi_set_param(cls_conn, param, buf, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (!err && conn->datadgst_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) conn->hdrdgst_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) conn->datadgst_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) case ISCSI_PARAM_MAX_R2T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) return iscsi_tcp_set_max_r2t(conn, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) case ISCSI_PARAM_MAX_RECV_DLENGTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) err = iscsi_set_param(cls_conn, param, buf, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) err = cxgbi_conn_max_recv_dlength(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) case ISCSI_PARAM_MAX_XMIT_DLENGTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) err = iscsi_set_param(cls_conn, param, buf, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) err = cxgbi_conn_max_xmit_dlength(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) return iscsi_set_param(cls_conn, param, buf, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) struct cxgbi_endpoint *cep = ep->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) struct cxgbi_sock *csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) "cls_conn 0x%p, param %d.\n", ep, param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) switch (param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) case ISCSI_PARAM_CONN_PORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) case ISCSI_PARAM_CONN_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) if (!cep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) csk = cep->csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) if (!csk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) return iscsi_conn_get_addr_param((struct sockaddr_storage *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) &csk->daddr, param, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct iscsi_cls_conn *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) struct iscsi_cls_conn *cls_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) struct iscsi_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) struct iscsi_tcp_conn *tcp_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) struct cxgbi_conn *cconn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) if (!cls_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) conn = cls_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) tcp_conn = conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) cconn = tcp_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) cconn->iconn = conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) return cls_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) EXPORT_SYMBOL_GPL(cxgbi_create_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) struct iscsi_cls_conn *cls_conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) u64 transport_eph, int is_leading)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) struct iscsi_conn *conn = cls_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) struct cxgbi_conn *cconn = tcp_conn->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) struct cxgbi_ppm *ppm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) struct iscsi_endpoint *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) struct cxgbi_endpoint *cep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) struct cxgbi_sock *csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) ep = iscsi_lookup_endpoint(transport_eph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) /* setup ddp pagesize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) cep = ep->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) csk = cep->csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) ppm = csk->cdev->cdev2ppm(csk->cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) ppm->tformat.pgsz_idx_dflt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) /* calculate the tag idx bits needed for this conn based on cmds_max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) write_lock_bh(&csk->callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) csk->user_data = conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) cconn->chba = cep->chba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) cconn->cep = cep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) cep->cconn = cconn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) write_unlock_bh(&csk->callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) cxgbi_conn_max_xmit_dlength(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) cxgbi_conn_max_recv_dlength(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) cls_session, cls_conn, ep, cconn, csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) /* init recv engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) iscsi_tcp_hdr_recv_prep(tcp_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) u16 cmds_max, u16 qdepth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) u32 initial_cmdsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) struct cxgbi_endpoint *cep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) struct cxgbi_hba *chba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) struct iscsi_cls_session *cls_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) struct iscsi_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) if (!ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) pr_err("missing endpoint.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) cep = ep->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) chba = cep->chba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) shost = chba->shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) BUG_ON(chba != iscsi_host_priv(shost));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) cls_session = iscsi_session_setup(chba->cdev->itp, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) cmds_max, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) sizeof(struct iscsi_tcp_task) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) sizeof(struct cxgbi_task_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) initial_cmdsn, ISCSI_MAX_TARGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) if (!cls_session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) session = cls_session->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) if (iscsi_tcp_r2tpool_alloc(session))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) goto remove_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) "ep 0x%p, cls sess 0x%p.\n", ep, cls_session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) return cls_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) remove_session:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) iscsi_session_teardown(cls_session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) EXPORT_SYMBOL_GPL(cxgbi_create_session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) void cxgbi_destroy_session(struct iscsi_cls_session *cls_session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) "cls sess 0x%p.\n", cls_session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) iscsi_tcp_r2tpool_free(cls_session->dd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) iscsi_session_teardown(cls_session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) EXPORT_SYMBOL_GPL(cxgbi_destroy_session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) char *buf, int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) struct cxgbi_hba *chba = iscsi_host_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) if (!chba->ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) shost_printk(KERN_ERR, shost, "Could not get host param. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) "netdev for host not set.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) shost, chba, chba->ndev->name, param, buflen, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) switch (param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) case ISCSI_HOST_PARAM_IPADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) __be32 addr = in_aton(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) cxgbi_set_iscsi_ipv4(chba, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) case ISCSI_HOST_PARAM_HWADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) case ISCSI_HOST_PARAM_NETDEV_NAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) return iscsi_host_set_param(shost, param, buf, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) EXPORT_SYMBOL_GPL(cxgbi_set_host_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) struct cxgbi_hba *chba = iscsi_host_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) if (!chba->ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) shost_printk(KERN_ERR, shost, "Could not get host param. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) "netdev for host not set.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) "shost 0x%p, hba 0x%p,%s, param %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) shost, chba, chba->ndev->name, param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) switch (param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) case ISCSI_HOST_PARAM_HWADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) case ISCSI_HOST_PARAM_NETDEV_NAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) len = sprintf(buf, "%s\n", chba->ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) case ISCSI_HOST_PARAM_IPADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) struct cxgbi_sock *csk = find_sock_on_port(chba->cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) chba->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) if (csk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) len = sprintf(buf, "%pIS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) (struct sockaddr *)&csk->saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) "hba %s, addr %s.\n", chba->ndev->name, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) return iscsi_host_get_param(shost, param, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) EXPORT_SYMBOL_GPL(cxgbi_get_host_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) struct sockaddr *dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) int non_blocking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) struct iscsi_endpoint *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) struct cxgbi_endpoint *cep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) struct cxgbi_hba *hba = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) struct cxgbi_sock *csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) int ifindex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) shost, non_blocking, dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) if (shost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) hba = iscsi_host_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) if (!hba) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) pr_info("shost 0x%p, priv NULL.\n", shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) check_route:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (dst_addr->sa_family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) csk = cxgbi_check_route(dst_addr, ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) } else if (dst_addr->sa_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) csk = cxgbi_check_route6(dst_addr, ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) pr_info("address family 0x%x NOT supported.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) dst_addr->sa_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) err = -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) return (struct iscsi_endpoint *)ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) if (IS_ERR(csk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) return (struct iscsi_endpoint *)csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) cxgbi_sock_get(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) if (!hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) hba = csk->cdev->hbas[csk->port_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) else if (hba != csk->cdev->hbas[csk->port_id]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) if (ifindex != hba->ndev->ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) cxgbi_sock_closed(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) ifindex = hba->ndev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) goto check_route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) pr_info("Could not connect through requested host %u"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) "hba 0x%p != 0x%p (%u).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) shost->host_no, hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) csk->cdev->hbas[csk->port_id], csk->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) goto release_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) err = sock_get_port(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) goto release_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) cxgbi_sock_set_state(csk, CTP_CONNECTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) err = csk->cdev->csk_init_act_open(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) goto release_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) if (cxgbi_sock_is_closing(csk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) pr_info("csk 0x%p is closing.\n", csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) goto release_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) ep = iscsi_create_endpoint(sizeof(*cep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) if (!ep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) pr_info("iscsi alloc ep, OOM.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) goto release_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) cep = ep->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) cep->csk = csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) cep->chba = hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) ep, cep, csk, hba, hba->ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) return ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) release_conn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) cxgbi_sock_closed(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) EXPORT_SYMBOL_GPL(cxgbi_ep_connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) struct cxgbi_endpoint *cep = ep->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) struct cxgbi_sock *csk = cep->csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) if (!cxgbi_sock_is_established(csk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) EXPORT_SYMBOL_GPL(cxgbi_ep_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) struct cxgbi_endpoint *cep = ep->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) struct cxgbi_conn *cconn = cep->cconn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) struct cxgbi_sock *csk = cep->csk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) ep, cep, cconn, csk, csk->state, csk->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) if (cconn && cconn->iconn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) iscsi_suspend_tx(cconn->iconn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) write_lock_bh(&csk->callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) cep->csk->user_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) cconn->cep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) write_unlock_bh(&csk->callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) iscsi_destroy_endpoint(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) if (likely(csk->state >= CTP_ESTABLISHED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) need_active_close(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) cxgbi_sock_closed(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) cxgbi_sock_put(csk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) int cxgbi_iscsi_init(struct iscsi_transport *itp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) struct scsi_transport_template **stt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) *stt = iscsi_register_transport(itp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) if (*stt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) pr_err("unable to register %s transport 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) itp->name, itp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) "%s, registered iscsi transport 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) itp->name, stt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) EXPORT_SYMBOL_GPL(cxgbi_iscsi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) struct scsi_transport_template **stt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) if (*stt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) log_debug(1 << CXGBI_DBG_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) "de-register transport 0x%p, %s, stt 0x%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) itp, itp->name, *stt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) *stt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) iscsi_unregister_transport(itp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) umode_t cxgbi_attr_is_visible(int param_type, int param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) switch (param_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) case ISCSI_HOST_PARAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) switch (param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) case ISCSI_HOST_PARAM_NETDEV_NAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) case ISCSI_HOST_PARAM_HWADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) case ISCSI_HOST_PARAM_IPADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) case ISCSI_HOST_PARAM_INITIATOR_NAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) return S_IRUGO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) case ISCSI_PARAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) switch (param) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) case ISCSI_PARAM_MAX_RECV_DLENGTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) case ISCSI_PARAM_MAX_XMIT_DLENGTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) case ISCSI_PARAM_HDRDGST_EN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) case ISCSI_PARAM_DATADGST_EN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) case ISCSI_PARAM_CONN_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) case ISCSI_PARAM_CONN_PORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) case ISCSI_PARAM_EXP_STATSN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) case ISCSI_PARAM_PERSISTENT_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) case ISCSI_PARAM_PERSISTENT_PORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) case ISCSI_PARAM_PING_TMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) case ISCSI_PARAM_RECV_TMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) case ISCSI_PARAM_INITIAL_R2T_EN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) case ISCSI_PARAM_MAX_R2T:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) case ISCSI_PARAM_IMM_DATA_EN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) case ISCSI_PARAM_FIRST_BURST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) case ISCSI_PARAM_MAX_BURST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) case ISCSI_PARAM_PDU_INORDER_EN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) case ISCSI_PARAM_DATASEQ_INORDER_EN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) case ISCSI_PARAM_ERL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) case ISCSI_PARAM_TARGET_NAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) case ISCSI_PARAM_TPGT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) case ISCSI_PARAM_USERNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) case ISCSI_PARAM_PASSWORD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) case ISCSI_PARAM_USERNAME_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) case ISCSI_PARAM_PASSWORD_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) case ISCSI_PARAM_FAST_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) case ISCSI_PARAM_ABORT_TMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) case ISCSI_PARAM_LU_RESET_TMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) case ISCSI_PARAM_TGT_RESET_TMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) case ISCSI_PARAM_IFACE_NAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) case ISCSI_PARAM_INITIATOR_NAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) return S_IRUGO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) static int __init libcxgbi_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) pr_info("%s", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) sizeof(struct cxgbi_skb_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) rsvd_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) if (!rsvd_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) static void __exit libcxgbi_exit_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) cxgbi_device_unregister_all(0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) put_page(rsvd_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) module_init(libcxgbi_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) module_exit(libcxgbi_exit_module);